{"openapi":"3.0.0","info":{"version":"2026-01-22T10:14:56Z","title":"RestApi"},"paths":{"/assets":{"get":{"tags":["Assets"],"description":"List assets of a project team.\nSupports both public access (via the `Authorization` header set to `public-auth-token`) and authenticated user access (including API keys).","operationId":"GetAssets","parameters":[{"name":"updatedBefore","in":"query","description":"Filter results to only return assets updated before the specified ISO string date (exclusive). Requires the sortBy parameter to be \"updatedAt\"","required":false,"schema":{"type":"string"}},{"name":"sortDirection","in":"query","description":"Sort results in ascending (asc) or descending (desc) order","required":false,"schema":{"type":"string"}},{"name":"privacy","in":"query","description":"Filters result by asset privacy. If set to public, it will return -all- public assets from all organizations","required":false,"schema":{"type":"string"}},{"name":"inferenceId","in":"query","description":"List assets generated from a specific inference","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"query","description":"List assets generated from all inferences coming from a specific model (this is not the training images)","required":false,"schema":{"type":"string"}},{"name":"updatedAfter","in":"query","description":"Filter results to only return assets updated after the specified ISO string date (exclusive). Requires the sortBy parameter to be \"updatedAt\"","required":false,"schema":{"type":"string"}},{"name":"parentAssetId","in":"query","description":"List all the children assets that were generated from a specific parent asset","required":false,"schema":{"type":"string"}},{"name":"createdBefore","in":"query","description":"Filter results to only return assets created before the specified ISO string date (exclusive). Requires the sortBy parameter to be \"createdAt\"","required":false,"schema":{"type":"string"}},{"name":"sortBy","in":"query","description":"Sort results by the createdAt or updatedAt","required":false,"schema":{"type":"string"}},{"name":"createdAfter","in":"query","description":"Filter results to only return assets created after the specified ISO string date (exclusive). Requires the sortBy parameter to be \"createdAt\"","required":false,"schema":{"type":"string"}},{"name":"authorId","in":"query","description":"List assets generated by a specific author (the user that created the asset)","required":false,"schema":{"type":"string"}},{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"pageSize","in":"query","description":"The number of items to return in the response. The default value is 50, maximum value is 100, minimum value is 1","required":false,"schema":{"type":"string"}},{"name":"rootAssetId","in":"query","description":"List all the children assets that were generated from a specific root asset","required":false,"schema":{"type":"string"}},{"name":"type","in":"query","description":"List all the assets of a specific type. The parameter \"type\" and \"types\" cannot be used together. Can be any of the following values: inference-txt2img, inference-txt2img-ip-adapter, inference-txt2img-texture, inference-img2img, inference-img2img-ip-adapter, inference-img2img-texture, inference-inpaint, inference-inpaint-ip-adapter, inference-reference, inference-reference-texture, inference-controlnet, inference-controlnet-ip-adapter, inference-controlnet-img2img, inference-controlnet-reference, inference-controlnet-inpaint, inference-controlnet-inpaint-ip-adapter, inference-controlnet-texture, background-removal, canvas, canvas-export, canvas-drawing, detection, patch, pixelization, upscale, upscale-texture, upscale-skybox, vectorization, segment, segmentation-image, segmentation-mask, skybox-base-360, skybox-hdri, skybox-3d, restyle, reframe, generative-fill, texture, texture-height, texture-normal, texture-smoothness, texture-metallic, texture-edge, texture-ao, texture-albedo, image-prompt-editing, unknown, img23d, txt23d, 3d23d, 3d23d-texture, 3d-texture, 3d-texture-mtl, 3d-texture-albedo, 3d-texture-normal, 3d-texture-roughness, 3d-texture-metallic, img2video, txt2audio, audio2audio, video2video, video2img, txt2img, img2img, txt2video, uploaded, uploaded-video, uploaded-audio, uploaded-3d, uploaded-avatar, upscale-video, assets with a type starting with \"inference-\" will be returned","required":false,"schema":{"type":"string","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]}},{"name":"paginationToken","in":"query","description":"A token you received in a previous request to query the next page of items","required":false,"schema":{"type":"string"}},{"name":"tags","in":"query","description":"List of tags, comma separated. Only for public assets on all teams.","required":false,"schema":{"type":"string"}},{"name":"types","in":"query","description":"List of the asset types to request. The parameter \"type\" and \"types\" cannot be used together. Can be any of the following values: inference-txt2img, inference-txt2img-ip-adapter, inference-txt2img-texture, inference-img2img, inference-img2img-ip-adapter, inference-img2img-texture, inference-inpaint, inference-inpaint-ip-adapter, inference-reference, inference-reference-texture, inference-controlnet, inference-controlnet-ip-adapter, inference-controlnet-img2img, inference-controlnet-reference, inference-controlnet-inpaint, inference-controlnet-inpaint-ip-adapter, inference-controlnet-texture, background-removal, canvas, canvas-export, canvas-drawing, detection, patch, pixelization, upscale, upscale-texture, upscale-skybox, vectorization, segment, segmentation-image, segmentation-mask, skybox-base-360, skybox-hdri, skybox-3d, restyle, reframe, generative-fill, texture, texture-height, texture-normal, texture-smoothness, texture-metallic, texture-edge, texture-ao, texture-albedo, image-prompt-editing, unknown, img23d, txt23d, 3d23d, 3d23d-texture, 3d-texture, 3d-texture-mtl, 3d-texture-albedo, 3d-texture-normal, 3d-texture-roughness, 3d-texture-metallic, img2video, txt2audio, audio2audio, video2video, video2img, txt2img, img2img, txt2video, uploaded, uploaded-video, uploaded-audio, uploaded-3d, uploaded-avatar, upscale-video","required":false,"style":"form","explode":false,"schema":{"type":"array","items":{"type":"string","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]}}},{"name":"collectionId","in":"query","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetAssetsResponse"}}}}},"security":[{"Authorizer":[]}]},"post":{"tags":["Assets"],"description":"Upload an image or canvas","operationId":"PostAsset","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostAssetRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostAssetResponse"}}}}},"security":[{"Authorizer":[]}]},"delete":{"tags":["Assets"],"description":"Delete multiple assets","operationId":"DeleteAsset","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteAssetRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteAssetResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/download":{"post":{"tags":["Assets"],"description":"Request a link to batch download assets (batch limited to 1000 assets)","operationId":"PostDownloadAssets","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostDownloadAssetsRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostDownloadAssetsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/download/{jobId}":{"get":{"tags":["Assets"],"description":"Retrieve the status and the url of a batch download assets request","operationId":"GetDownloadAssets","parameters":[{"name":"jobId","in":"path","description":"The job ID to retrieve the download request","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetDownloadAssetsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/get-bulk":{"post":{"tags":["Assets"],"description":"Get multiple assets by their IDs","operationId":"PostAssetGetBulk","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostAssetGetBulkRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetAssetBulkResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/public":{"get":{"tags":["Assets"],"description":"List all public assets. @deprecated Use GET /assets with Authorization: public-auth-token header instead.","operationId":"GetPublicAssets","parameters":[{"name":"updatedBefore","in":"query","description":"Filter results to only return assets updated before the specified ISO string date (exclusive). Requires the sortBy parameter to be \"updatedAt\"","required":false,"schema":{"type":"string"}},{"name":"sortDirection","in":"query","description":"Sort results in ascending (asc) or descending (desc) order","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"query","description":"List assets generated from all inferences coming from a specific model (this is not the training images)","required":false,"schema":{"type":"string"}},{"name":"updatedAfter","in":"query","description":"Filter results to only return assets updated after the specified ISO string date (exclusive). Requires the sortBy parameter to be \"updatedAt\"","required":false,"schema":{"type":"string"}},{"name":"createdBefore","in":"query","description":"Filter results to only return assets created before the specified ISO string date (exclusive). Requires the sortBy parameter to be \"createdAt\"","required":false,"schema":{"type":"string"}},{"name":"sortBy","in":"query","description":"Sort results by the createdAt or updatedAt","required":false,"schema":{"type":"string"}},{"name":"createdAfter","in":"query","description":"Filter results to only return assets created after the specified ISO string date (exclusive). Requires the sortBy parameter to be \"createdAt\"","required":false,"schema":{"type":"string"}},{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"pageSize","in":"query","description":"The number of items to return in the response. The default value is 50, maximum value is 100, minimum value is 1","required":false,"schema":{"type":"string"}},{"name":"type","in":"query","description":"List all the assets of a specific type. The parameter \"type\" and \"types\" cannot be used together. Can be any of the following values: inference-txt2img, inference-txt2img-ip-adapter, inference-txt2img-texture, inference-img2img, inference-img2img-ip-adapter, inference-img2img-texture, inference-inpaint, inference-inpaint-ip-adapter, inference-reference, inference-reference-texture, inference-controlnet, inference-controlnet-ip-adapter, inference-controlnet-img2img, inference-controlnet-reference, inference-controlnet-inpaint, inference-controlnet-inpaint-ip-adapter, inference-controlnet-texture, background-removal, canvas, canvas-export, canvas-drawing, detection, patch, pixelization, upscale, upscale-texture, upscale-skybox, vectorization, segment, segmentation-image, segmentation-mask, skybox-base-360, skybox-hdri, skybox-3d, restyle, reframe, generative-fill, texture, texture-height, texture-normal, texture-smoothness, texture-metallic, texture-edge, texture-ao, texture-albedo, image-prompt-editing, unknown, img23d, txt23d, 3d23d, 3d23d-texture, 3d-texture, 3d-texture-mtl, 3d-texture-albedo, 3d-texture-normal, 3d-texture-roughness, 3d-texture-metallic, img2video, txt2audio, audio2audio, video2video, video2img, txt2img, img2img, txt2video, uploaded, uploaded-video, uploaded-audio, uploaded-3d, uploaded-avatar, upscale-video, assets with a type starting with \"inference-\" will be returned","required":false,"schema":{"type":"string"}},{"name":"paginationToken","in":"query","description":"A token you received in a previous request to query the next page of items","required":false,"schema":{"type":"string"}},{"name":"tags","in":"query","description":"List of tags, comma separated. Only for public assets on all teams.","required":false,"schema":{"type":"string"}},{"name":"types","in":"query","description":"List of the asset types to request. The parameter \"type\" and \"types\" cannot be used together. Can be any of the following values: inference-txt2img, inference-txt2img-ip-adapter, inference-txt2img-texture, inference-img2img, inference-img2img-ip-adapter, inference-img2img-texture, inference-inpaint, inference-inpaint-ip-adapter, inference-reference, inference-reference-texture, inference-controlnet, inference-controlnet-ip-adapter, inference-controlnet-img2img, inference-controlnet-reference, inference-controlnet-inpaint, inference-controlnet-inpaint-ip-adapter, inference-controlnet-texture, background-removal, canvas, canvas-export, canvas-drawing, detection, patch, pixelization, upscale, upscale-texture, upscale-skybox, vectorization, segment, segmentation-image, segmentation-mask, skybox-base-360, skybox-hdri, skybox-3d, restyle, reframe, generative-fill, texture, texture-height, texture-normal, texture-smoothness, texture-metallic, texture-edge, texture-ao, texture-albedo, image-prompt-editing, unknown, img23d, txt23d, 3d23d, 3d23d-texture, 3d-texture, 3d-texture-mtl, 3d-texture-albedo, 3d-texture-normal, 3d-texture-roughness, 3d-texture-metallic, img2video, txt2audio, audio2audio, video2video, video2img, txt2img, img2img, txt2video, uploaded, uploaded-video, uploaded-audio, uploaded-3d, uploaded-avatar, upscale-video","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetPublicAssetsResponse"}}}}},"deprecated":true}},"/assets/public/{assetId}":{"get":{"tags":["Assets"],"description":"Get the details of an asset. @deprecated Use GET /assets/{assetId} with Authorization: public-auth-token header instead.","operationId":"GetPublicAssetsByAssetId","parameters":[{"name":"assetId","in":"path","description":"The asset ID to retrieve","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetPublicAssetsByAssetIdResponse"}}}}},"deprecated":true}},"/assets/{assetId}":{"get":{"tags":["Assets"],"description":"Get the details of an asset.\nSupports both public access (via the `Authorization` header set to `public-auth-token`) and authenticated user access (including API keys).","operationId":"GetAssetsByAssetId","parameters":[{"name":"withEmbedding","in":"query","description":"Include the embedding in the response","required":false,"schema":{"type":"string"}},{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"assetId","in":"path","description":"The asset ID to retrieve","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetAssetsByAssetIdResponse"}}}}},"security":[{"Authorizer":[]}]},"put":{"tags":["Assets"],"description":"Update a canvas asset","operationId":"PutAssetByAssetId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"assetId","in":"path","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutAssetByAssetIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutAssetByAssetIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/{assetId}/copy":{"post":{"tags":["Assets"],"description":"Duplicate an asset","operationId":"CopyAssetByAssetId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"assetId","in":"path","description":"The ID of the asset to duplicate","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CopyAssetByAssetIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/CopyAssetResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/{assetId}/download":{"post":{"tags":["Assets"],"description":"Request a link to download the given `assetId` in the given `targetFormat`","operationId":"PostDownloadAsset","parameters":[{"name":"assetId","in":"path","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostDownloadAssetRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostDownloadAssetResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/{assetId}/lock":{"put":{"tags":["Assets"],"description":"Lock a canvas","operationId":"LockAssetByAssetId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"assetId","in":"path","description":"The ID of the canvas to lock","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/LockAssetByAssetIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/LockAssetByAssetIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/{assetId}/snapshots":{"get":{"tags":["Assets"],"description":"List snapshots of a canvas type asset","operationId":"GetCanvasAssetSnapshots","parameters":[{"name":"pageSize","in":"query","description":"The number of items to return in the response. The default value is 10, maximum value is 100, minimum value is 10","required":false,"schema":{"type":"string"}},{"name":"assetId","in":"path","description":"The ID of the canvas asset to list snapshots for","required":true,"schema":{"type":"string"}},{"name":"paginationToken","in":"query","description":"A token you received in a previous request to query the next page of items","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetCanvasAssetSnapshotsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/{assetId}/tags":{"put":{"tags":["Assets"],"description":"Add/delete tags on a specific asset","operationId":"PutAssetsTagsByAssetId","parameters":[{"name":"assetId","in":"path","description":"The ID of the asset to update its tags","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutAssetsTagsByAssetIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutAssetsTagsByAssetIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/assets/{assetId}/unlock":{"put":{"tags":["Assets"],"description":"Unlock a canvas","operationId":"UnlockAssetByAssetId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"assetId","in":"path","description":"The ID of the canvas to unlock","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UnlockAssetByAssetIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UnlockAssetByAssetIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/collections":{"get":{"tags":["Collections"],"description":"List collections of a team","operationId":"GetCollections","parameters":[{"name":"pageSize","in":"query","description":"The number of items to return in the response. The default value is 10, maximum value is 100, minimum value is 1","required":false,"schema":{"type":"string"}},{"name":"paginationToken","in":"query","description":"A token you received in a previous request to query the next page of items","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetCollectionsResponse"}}}}},"security":[{"Authorizer":[]}]},"post":{"tags":["Collections"],"description":"Create a new collection","operationId":"PostCollection","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostCollectionRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostCollectionResponse"}}}}},"security":[{"Authorizer":[]}]}},"/collections/{collectionId}":{"get":{"tags":["Collections"],"description":"Get the details of a collection","operationId":"GetCollectionsByCollectionId","parameters":[{"name":"collectionId","in":"path","description":"The collection ID to retrieve","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetCollectionsByCollectionIdResponse"}}}}},"security":[{"Authorizer":[]}]},"put":{"tags":["Collections"],"description":"Update the name and/or thumbnail of a Collection","operationId":"PutCollectionsByCollectionId","parameters":[{"name":"collectionId","in":"path","description":"The collectionId to update","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutCollectionsByCollectionIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutCollectionsByCollectionIdResponse"}}}}},"security":[{"Authorizer":[]}]},"delete":{"tags":["Collections"],"description":"Delete a collection","operationId":"DeleteCollectionsByCollectionId","parameters":[{"name":"collectionId","in":"path","description":"The collection ID to delete","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteCollectionResponse"}}}}},"security":[{"Authorizer":[]}]}},"/collections/{collectionId}/assets":{"put":{"tags":["Collections"],"description":"Add assets to a specific collection","operationId":"PutAssetsByCollectionId","parameters":[{"name":"collectionId","in":"path","description":"The ID of the collection to add assets to","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutAssetsByCollectionIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutAssetsByCollectionIdResponse"}}}}},"security":[{"Authorizer":[]}]},"delete":{"tags":["Collections"],"description":"Remove assets from a specific collection","operationId":"DeleteAssetsByCollectionId","parameters":[{"name":"collectionId","in":"path","description":"The ID of the collection to remove the assets from","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteAssetsByCollectionIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteAssetsByCollectionIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/collections/{collectionId}/models":{"put":{"tags":["Collections"],"description":"Add models to a specific collection","operationId":"PutModelsByCollectionId","parameters":[{"name":"collectionId","in":"path","description":"The ID of the collection to add models to","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsByCollectionIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsByCollectionIdResponse"}}}}},"security":[{"Authorizer":[]}]},"delete":{"tags":["Collections"],"description":"Remove models from a specific collection","operationId":"DeleteModelsByCollectionId","parameters":[{"name":"collectionId","in":"path","description":"The ID of the collection to remove the models from","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteModelsByCollectionIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteModelsByCollectionIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/caption":{"post":{"tags":["Generate: Text"],"description":"Caption image(s)","operationId":"PostCaptionInferences","parameters":[{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostCaptionInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostCaptionInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/controlnet":{"post":{"tags":["Generate: Image (Single ref)"],"description":"Trigger a new image generation in ControlNet mode. The control image is used to guide the generation; it can be a pose, canny map, or similar.","operationId":"PostControlnetInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/controlnet-img2img":{"post":{"tags":["Generate: Image (Dual ref)"],"description":"Trigger a new image generation in ControlNet + Img2Img mode. The control image is used to guide the generation; it can be a pose, canny map, or similar. The reference image is used to initialize the generation process.","operationId":"PostControlnetImg2imgInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetImg2imgInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetImg2imgInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/controlnet-inpaint":{"post":{"tags":["Generate: Image Inpainting"],"description":"Trigger a new image generation in ControlNet + Inpaint mode. The control image is used to guide the generation; it can be a pose, canny map, or similar. The mask indicates the area to inpaint in the reference image.","operationId":"PostControlnetInpaintInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetInpaintInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetInpaintInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/controlnet-inpaint-ip-adapter":{"post":{"tags":["Generate: Image Inpainting"],"description":"Trigger a new image generation in ControlNet + Inpaint + IpAdapter mode. The control image is used to guide the generation; it can be a pose, canny map, or similar. The mask indicates the area to inpaint in the reference image, and the second reference image is used as an IPAdapter to guide the generation process.","operationId":"PostControlnetInpaintIpAdapterInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetInpaintIpAdapterInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetInpaintIpAdapterInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/controlnet-ip-adapter":{"post":{"tags":["Generate: Image (Dual ref)"],"description":"Trigger a new image generation in ControlNet + IpAdapter mode. The control image is used to guide the generation; it can be a pose, canny map, or similar. The second reference image is used as an IPAdapter to guide the generation process.","operationId":"PostControlnetIpAdapterInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetIpAdapterInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetIpAdapterInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/controlnet-texture":{"post":{"tags":["Generate: Texture"],"description":"Trigger a new seamless texture image generation in ControlNet mode. The control image is used to guide the generation; it can be a pose, canny map, or similar.","operationId":"PostControlnetTextureInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetTextureInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostControlnetTextureInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/custom/{modelId}":{"post":{"tags":["Generate"],"description":"Generate with any model (Image, Video, Audio, 3d).\n\nYou can retrieve the model inputs from the `GET /models/{modelId}` endpoint.\n\n**Note**: This endpoint is not available yet for SD1.5, SDXL and Flux.1 based models. For these models, use the `POST /generate/{inferenceType}` endpoint. Ex: `POST /generate/txt2img`","operationId":"PostGenerateCustom","parameters":[{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostGenerateCustomRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostGenerateCustomResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/describe-style":{"post":{"tags":["Generate: Text"],"description":"Describe the style of the given images or models.","operationId":"PostDescribeStyleInferences","parameters":[{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostDescribeStyleInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostDescribeStyleInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/detect":{"post":{"tags":["Generate: Detection Maps"],"description":"Advanced precision in image generation by transforming visual data from input images into mode maps.","operationId":"PostDetectInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostDetectInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndAssetAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/embed":{"post":{"tags":["Generate: Embedding"],"description":"Get embeddings from text","operationId":"PostEmbedInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostEmbedInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostEmbedInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/generative-fill":{"post":{"tags":["Generate: Image Generative Fill"],"description":"Generative fill replace the selected mask area content based on the context. Used to erase objects or characters.","operationId":"PostGenerativeFillInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostGenerativeFillInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/img2img":{"post":{"tags":["Generate: Image (Single ref)"],"description":"Trigger a new image generation in Img2Img mode with one reference image that initializes the generation process.","operationId":"PostImg2imgInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostImg2imgInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostImg2imgInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/img2img-ip-adapter":{"post":{"tags":["Generate: Image (Dual ref)"],"description":"Trigger a new image generation in Img2Img + IpAdapter mode. The first image is used to initialize the generation, and the second reference image is used as an IPAdapter.","operationId":"PostImg2imgIpAdapterInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostImg2imgIpAdapterInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostImg2imgIpAdapterInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/img2img-texture":{"post":{"tags":["Generate: Texture"],"description":"Trigger a new seamless texture image generation in Img2Img mode with one reference image that initializes the generation.","operationId":"PostImg2imgTextureInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostImg2imgTextureInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostImg2imgTextureInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/inpaint":{"post":{"tags":["Generate: Image Inpainting"],"description":"Trigger a new image generation in Inpaint mode. The mask indicates the area to inpaint in the reference image.","operationId":"PostInpaintInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostInpaintInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostInpaintInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/inpaint-ip-adapter":{"post":{"tags":["Generate: Image Inpainting"],"description":"Trigger a new image generation in Inpaint + IpAdapter mode. The mask indicates the area to inpaint in the reference image, and the second reference image is used as an IPAdapter to guide the inpainting.","operationId":"PostInpaintIpAdapterInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostInpaintIpAdapterInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostInpaintIpAdapterInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/patch":{"post":{"tags":["Generate: Image Editing"],"description":"Patch an asset with an image.","operationId":"PostPatchInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostPatchInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndAssetAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/pixelate":{"post":{"tags":["Generate: Image Editing"],"description":"Advanced pixelization of an image.","operationId":"PostPixelateInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostPixelateInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndAssetAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/prompt":{"post":{"tags":["Generate: Text"],"description":"Generate, complete or invent new prompts.","operationId":"PostPromptInferences","parameters":[{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostPromptInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostPromptInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/prompt-editing":{"post":{"tags":["Generate: Image Editing"],"description":"**[DEPRECATED]** This API is deprecated.\n\nEdit an image with a prompt.\n\n**Note**: Please use `POST /generate/custom/{modelId}` endpoint instead. You can retrieve the model inputs from the `GET /models/{modelId}` endpoint. Use one of the following models:\n\n- `model_bytedance-seedream-4-editing`\n- `model_flux-kontext-editing`\n- `model_google-gemini-2-5-flash-image-editing`\n- `model_openai-gpt-image-1-editing`\n- `model_qwen-image-editing`\n- `model_runway-gen4-image-editing`\n\nor one of your Kontext LoRA.\n\nSee https://docs.scenario.com/docs/edit-images-with-prompts for more details.","operationId":"PostPromptEditingInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostPromptEditingInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}],"deprecated":true}},"/generate/reframe":{"post":{"tags":["Generate: Image Reframe"],"description":"Reframe a given image to new sizes. Extra space is filled based on the context.","operationId":"PostReframeInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostReframeInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/remove-background":{"post":{"tags":["Generate: Image Editing"],"description":"**[DEPRECATED]** This API is deprecated.\n\nAdvanced remove-background of an image.\n\n**Note**: Please use `POST /generate/custom/{modelId}` endpoint instead. You can retrieve the model inputs from the `GET /models/{modelId}` endpoint. Please use `model_bria-remove-background` `modelId` for example. See https://docs.scenario.com/docs/tools-parameters-reference for more details.","operationId":"PostRemoveBackgroundInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostRemoveBackgroundInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndAssetAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}],"deprecated":true}},"/generate/restyle":{"post":{"tags":["Generate: Image Restyle"],"description":"Trigger a restyle process from one sketch image (or other image) and one or more reference style images.","operationId":"PostRestyleInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostRestyleInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/segment":{"post":{"tags":["Generate: Image Segmentation"],"description":"Trigger the segmentation of an image. The process will create a new Asset with the segmentation mask as a child.","operationId":"PostSegmentInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostSegmentInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostSegmentInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/skybox-base-360":{"post":{"tags":["Generate: Skybox"],"description":"Trigger the generation of a 360 skybox seamless image.","operationId":"PostSkyboxBase360Inferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostSkyboxBase360InferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/skybox-upscale-360":{"post":{"tags":["Generate: Skybox"],"description":"Trigger the upscaling of an image matching the 360 skyboxes specific geometry.","operationId":"PostSkyboxUpscale360Inferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostSkyboxUpscale360InferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/texture":{"post":{"tags":["Generate: Texture"],"description":"Trigger the conversion of an image texture to different texture maps:\n- Height map\n- Normal map\n- Smoothness map\n- Metallic map\n- Edge map\n- Ambient Occlusion map\n\nThe process will create a new Asset with the above texture maps as children + the original image as an Albedo map.\n","operationId":"PostTextureInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostTextureInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/translate":{"post":{"tags":["Generate: Text"],"description":"Translate text from one language to english.","operationId":"PostTranslateInferences","parameters":[{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostTranslateInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostTranslateInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/txt2img":{"post":{"tags":["Generate: Image (No ref)"],"description":"Trigger a new image generation in Txt2Img mode.","operationId":"PostTxt2imgInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostTxt2imgInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostTxt2imgInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/txt2img-ip-adapter":{"post":{"tags":["Generate: Image (Single ref)"],"description":"Trigger a new image generation in Txt2Img mode with one IpAdapter reference image that guides the generation process.","operationId":"PostTxt2imgIpAdapterInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostTxt2imgIpAdapterInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostTxt2imgIpAdapterInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/txt2img-texture":{"post":{"tags":["Generate: Texture"],"description":"Trigger a new seamless texture image generation in Txt2Img mode.","operationId":"PostTxt2imgTextureInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostTxt2imgTextureInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostTxt2imgTextureInferencesResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}]}},"/generate/upscale":{"post":{"tags":["Generate: Image Upscaling"],"description":"**[DEPRECATED]** This API is deprecated.\n\nUpscale an image. You can use styles and presets to quickly get results or craft your very own settings.\n\n**Note**: Please user `POST /generate/custom/{modelId}` endpoint instead. You can retrieve the model inputs from the `GET /models/{modelId}` endpoint. Please use `model_scenario-upscale-v3` `modelId` for example. See https://docs.scenario.com/docs/upscale-generation for more details.","operationId":"PostUpscaleInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostUpscaleInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}],"deprecated":true}},"/generate/vectorize":{"post":{"tags":["Generate: Image Editing"],"description":"**[DEPRECATED]** This API is deprecated.\n\nAdvanced vectorization of an image.\n\n**Note**: Please use `POST /generate/custom/{modelId}` endpoint instead. You can retrieve the model inputs from the `GET /models/{modelId}` endpoint. Please use `model_visioncortex-vtracer` `modelId` for example. See https://docs.scenario.com/docs/tools-parameters-reference for more details.","operationId":"PostVectorizeInferences","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostVectorizeInferencesRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobAndAssetAndBillingGenerateResponse"}}}},"269":{"description":"269 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DryRunResponse"}}}}},"security":[{"Authorizer":[]}],"deprecated":true}},"/jobs":{"get":{"tags":["Jobs"],"description":"List all jobs matching the given filters. A job is a synchronous operation or an asynchronous task such as a training, a generation, etc. It offers a unified view of all operations running on the platform along with their status and results.","operationId":"GetJobs","parameters":[{"name":"authorId","in":"query","description":"The authorId of the jobs to return. Optional.","required":false,"schema":{"type":"string"}},{"name":"hideResults","in":"query","description":"If false, jobs containing the hideResults param will be not returned. Optional.","required":false,"schema":{"type":"string"}},{"name":"workflowId","in":"query","required":false,"schema":{"type":"string"}},{"name":"pageSize","in":"query","description":"The number of items to return in the response. The default value is 10, maximum value is 200, minimum value is 1","required":false,"schema":{"type":"string"}},{"name":"type","in":"query","description":"The type of the jobs to return. If \"types\" is defined, \"type\" will be ignored. Optional.","required":false,"schema":{"type":"string"}},{"name":"paginationToken","in":"query","description":"A token you received in a previous request to query the next page of items","required":false,"schema":{"type":"string"}},{"name":"types","in":"query","description":"The types of the jobs to return. If \"types\" is defined, \"type\" will be ignored. Optional.","required":false,"schema":{"type":"string"}},{"name":"status","in":"query","description":"The status of the jobs to return. Optional.","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetJobsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/jobs/{jobId}":{"get":{"tags":["Jobs"],"description":"Get job data by job ID","operationId":"GetJobId","parameters":[{"name":"jobId","in":"path","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetJobIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/jobs/{jobId}/action":{"post":{"tags":["Jobs"],"description":"Trigger an action on a job: cancel","operationId":"PostJobActionByJobId","parameters":[{"name":"jobId","in":"path","description":"The job Id","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostJobActionByJobIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostJobActionByJobIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models":{"get":{"tags":["Models"],"description":"List all models. Supports both public access (via the `Authorization` header set to `public-auth-token`) and authenticated user access (including API keys).","operationId":"GetModels","parameters":[{"name":"updatedBefore","in":"query","description":"Filter results to only return models updated before the specified ISO string date (exclusive). Requires the sortBy parameter to be \"updatedAt\". Only available when privacy=public","required":false,"schema":{"type":"string"}},{"name":"sortDirection","in":"query","description":"Sort results in ascending (asc) or descending (desc) order. Only used when sortBy is specified. Available for both privacy=public and privacy=private/unlisted.\nFor public models, this parameter is ignored when sortBy is not specified or set to score.","required":false,"schema":{"type":"string"}},{"name":"collectionIds","in":"query","description":"List of collection ids, comma separated. Only available when privacy=public","required":false,"schema":{"type":"string"}},{"name":"privacy","in":"query","description":"The privacy of the models to return. The default value is `private`, possible values are `private` and `public`","required":false,"schema":{"type":"string"}},{"name":"updatedAfter","in":"query","description":"Filter results to only return models updated after the specified ISO string date (exclusive). Requires the sortBy parameter to be \"updatedAt\". Only available when privacy=public","required":false,"schema":{"type":"string"}},{"name":"blacklisted","in":"query","description":"If set to true, returns the list of models blacklisted for the team (only available for team admins)","required":false,"schema":{"type":"string"}},{"name":"status","in":"query","description":"The status of the models to return. Only available when privacy=private/unlisted","required":false,"schema":{"type":"string"}},{"name":"createdBefore","in":"query","description":"Filter results to only return models created before the specified ISO string date (exclusive). Requires the sortBy parameter to be \"createdAt\". Available for both privacy=public and privacy=private/unlisted","required":false,"schema":{"type":"string"}},{"name":"sortBy","in":"query","description":"Sort results by createdAt, updatedAt, or score. When privacy=public, defaults to score if not specified. When privacy=private/unlisted, supports createdAt and score (default: createdAt). When sortBy=score for privacy=private/unlisted, both privacy and status query parameters are required.","required":false,"schema":{"type":"string"}},{"name":"createdAfter","in":"query","description":"Filter results to only return models created after the specified ISO string date (exclusive). Requires the sortBy parameter to be \"createdAt\". Available for both privacy=public and privacy=private/unlisted","required":false,"schema":{"type":"string"}},{"name":"loadedOnly","in":"query","description":"If set to true, returns the list of models currently loaded on GPU","required":false,"schema":{"type":"string"}},{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"pageSize","in":"query","description":"The number of items to return in the response. The default value is 100, maximum value is 500, minimum value is 1","required":false,"schema":{"type":"string"}},{"name":"type","in":"query","description":"List all the models of a specific type. Can be any of the following values: sd-1_5, sd-1_5-lora, sd-1_5-composition, sd-xl, sd-xl-lora, sd-xl-composition, flux.1, flux.1-lora, flux.1-kontext-dev, flux.1-krea-dev, flux.1-kontext-lora, flux.1-krea-lora, flux.1-composition, flux.1-pro, flux1.1-pro, flux.1.1-pro-ultra, gpt-image-1, custom. Only available when privacy=public","required":false,"schema":{"type":"string"}},{"name":"paginationToken","in":"query","description":"A token you received in a previous request to query the next page of items","required":false,"schema":{"type":"string"}},{"name":"tags","in":"query","description":"List of tags, comma separated. Only available when privacy=public","required":false,"schema":{"type":"string"}},{"name":"types","in":"query","description":"List of types, comma separated. Can be any of the following values: sd-1_5, sd-1_5-lora, sd-1_5-composition, sd-xl, sd-xl-lora, sd-xl-composition, flux.1, flux.1-lora, flux.1-kontext-dev, flux.1-krea-dev, flux.1-kontext-lora, flux.1-krea-lora, flux.1-composition, flux.1-pro, flux1.1-pro, flux.1.1-pro-ultra, gpt-image-1, custom. Only available when privacy=public","required":false,"schema":{"type":"string"}},{"name":"collectionId","in":"query","description":"When provided, only the models in the Collection will be returned. Only available when privacy=private/unlisted (note: this is different from collectionIds which is only for privacy=public)","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetModelsResponse"}}}}},"security":[{"Authorizer":[]}]},"post":{"tags":["Models"],"description":"Create a new model","operationId":"PostModels","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/get-bulk":{"post":{"tags":["Models"],"description":"Get multiple models by their `modelIds`","operationId":"PostModelsGetBulk","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsGetBulkRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsGetBulkResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/public":{"get":{"tags":["Models"],"description":"List all public models. @deprecated Use GET /models with Authorization: public-auth-token header instead.","operationId":"GetPublicModels","parameters":[{"name":"updatedBefore","in":"query","description":"Filter results to only return models updated before the specified ISO string date (exclusive). Requires the sortBy parameter to be \"updatedAt\"","required":false,"schema":{"type":"string"}},{"name":"sortDirection","in":"query","description":"Sort results in ascending (asc) or descending (desc) order","required":false,"schema":{"type":"string"}},{"name":"collectionIds","in":"query","description":"List of collection ids, comma separated.","required":false,"schema":{"type":"string"}},{"name":"pageSize","in":"query","description":"The number of items to return in the response. The default value is 50, maximum value is 500, minimum value is 1","required":false,"schema":{"type":"string"}},{"name":"type","in":"query","description":"List all the models of a specific type. Can be any of the following values: sd-1_5, sd-1_5-lora, sd-1_5-composition, sd-xl, sd-xl-lora, sd-xl-composition, flux.1, flux.1-lora, flux.1-kontext-dev, flux.1-krea-dev, flux.1-kontext-lora, flux.1-krea-lora, flux.1-composition, flux.1-pro, flux1.1-pro, flux.1.1-pro-ultra, gpt-image-1, custom","required":false,"schema":{"type":"string"}},{"name":"updatedAfter","in":"query","description":"Filter results to only return models updated after the specified ISO string date (exclusive). Requires the sortBy parameter to be \"updatedAt\"","required":false,"schema":{"type":"string"}},{"name":"paginationToken","in":"query","description":"A token you received in a previous request to query the next page of items","required":false,"schema":{"type":"string"}},{"name":"tags","in":"query","description":"List of tags, comma separated.","required":false,"schema":{"type":"string"}},{"name":"types","in":"query","description":"List of types, comma separated. Can be any of the following values: sd-1_5, sd-1_5-lora, sd-1_5-composition, sd-xl, sd-xl-lora, sd-xl-composition, flux.1, flux.1-lora, flux.1-kontext-dev, flux.1-krea-dev, flux.1-kontext-lora, flux.1-krea-lora, flux.1-composition, flux.1-pro, flux1.1-pro, flux.1.1-pro-ultra, gpt-image-1, custom","required":false,"schema":{"type":"string"}},{"name":"createdBefore","in":"query","description":"Filter results to only return models created before the specified ISO string date (exclusive). Requires the sortBy parameter to be \"createdAt\"","required":false,"schema":{"type":"string"}},{"name":"sortBy","in":"query","description":"Sort results by the createdAt or updatedAt","required":false,"schema":{"type":"string"}},{"name":"createdAfter","in":"query","description":"Filter results to only return models created after the specified ISO string date (exclusive). Requires the sortBy parameter to be \"createdAt\"","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetPublicModelsResponse"}}}}},"deprecated":true}},"/models/public/{modelId}":{"get":{"tags":["Models"],"description":"Get the details of the given `modelId`. @deprecated Use GET /models/{modelId} with Authorization: public-auth-token header instead.","operationId":"GetPublicModelsByModelId","parameters":[{"name":"modelId","in":"path","description":"The model ID to retrieve","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetPublicModelsByModelIdResponse"}}}}},"deprecated":true}},"/models/{modelId}":{"get":{"tags":["Models"],"description":"Get the details of the given `modelId`, including its training status and training progress if available.\nSupports both public access (via the `Authorization` header set to `public-auth-token`) and authenticated user access (including API keys).","operationId":"GetModelsByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The model's `modelId` to retrieve","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetModelsByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]},"put":{"tags":["Models"],"description":"Update the given `modelId`","operationId":"PutModelsByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The model's `modelId` to update","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsByModelIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]},"delete":{"tags":["Models"],"description":"Delete a model","operationId":"DeleteModelsByModelId","parameters":[{"name":"modelId","in":"path","description":"The modelId to delete","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteModelsByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/copy":{"post":{"tags":["Models"],"description":"Copy the given `modelId` to a new model, thumbnail, presets, and all of its training images and pairs if any","operationId":"PostModelsCopyByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The modelId to copy","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsCopyRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsCopyByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/description":{"get":{"tags":["Models"],"description":"Get the description of the given `modelId`","operationId":"GetModelsDescriptionByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The description's `modelId` to retrieve","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetModelsDescriptionByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]},"put":{"tags":["Models"],"description":"Update the markdown description of the given `modelId`","operationId":"PutModelsDescriptionByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The description's `modelId` to update","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsDescriptionByModelIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsDescriptionByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/download":{"post":{"tags":["Models"],"description":"Request a link to download the given `modelId`","operationId":"PostDownloadModel","parameters":[{"name":"modelId","in":"path","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostDownloadModelRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostDownloadModelResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/examples":{"get":{"tags":["Models"],"description":"List all examples of the given `modelId`","operationId":"GetModelsExamplesByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The examples' `modelId` to retrieve","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetModelsExamplesByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]},"put":{"tags":["Models"],"description":"Add/delete/sort examples of the given `modelId`","operationId":"PutModelsExamplesByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The examples' `modelId` to update","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsExamplesByModelIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsExamplesByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/images":{"delete":{"tags":["Models"],"description":"Delete an image","operationId":"DeleteModelsImagesByModelId","parameters":[{"name":"modelId","in":"path","description":"The images' `modelId` to delete","required":true,"schema":{"type":"string"}},{"name":"ids","in":"query","description":"The asset ids of the images to delete","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteModelsImagesByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/presets":{"get":{"tags":["Models"],"description":"List all presets for the given `modelId`","operationId":"GetModelPresetsByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The presets' `modelId`","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetModelPresetsByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]},"post":{"tags":["Models"],"description":"Create a new preset for the given `modelId`","operationId":"PostModelPresetByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The preset's `modelId`","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelPresetByModelIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelPresetByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/presets/{presetId}":{"put":{"tags":["Models"],"description":"Modify the given `presetId`","operationId":"PutModelPresetByModelIdAndPresetId","parameters":[{"name":"modelId","in":"path","description":"The preset's `modelId`","required":true,"schema":{"type":"string"}},{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"presetId","in":"path","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelPresetByModelIdAndPresetIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelPresetByModelIdAndPresetIdResponse"}}}}},"security":[{"Authorizer":[]}]},"delete":{"tags":["Models"],"description":"Delete a preset for the given `modelId`","operationId":"DeleteModelPresetByModelIdAndPresetId","parameters":[{"name":"modelId","in":"path","description":"The preset's `modelId` to delete","required":true,"schema":{"type":"string"}},{"name":"presetId","in":"path","description":"The preset's `presetId` to delete","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteModelPresetByModelIdAndPresetIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/scores/prompt":{"get":{"tags":["Models"],"description":"Get the prompt scores for the given `modelId`","operationId":"GetModelsScoresPromptByModelId","parameters":[{"name":"modelId","in":"path","description":"The prompt scores' `modelId`","required":true,"schema":{"type":"string"}},{"name":"prompt","in":"query","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetModelsScoresPromptByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/scores/training-dataset":{"get":{"tags":["Models Training"],"description":"Get the training dataset scores for the given `modelId`","operationId":"GetModelsScoresTrainingDatasetByModelId","parameters":[{"name":"modelId","in":"path","description":"The training dataset scores' `modelId`","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetModelsScoresTrainingDatasetByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/tags":{"put":{"tags":["Models"],"description":"Add/delete tags for the given `modelId`","operationId":"PutModelsTagsByModelId","parameters":[{"name":"modelId","in":"path","description":"The tags' `modelId`","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsTagsByModelIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsTagsByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/train":{"put":{"tags":["Models Training"],"description":"Trigger the given `modelId` training","operationId":"PutModelsTrainByModelId","parameters":[{"name":"modelId","in":"path","description":"The training's `modelId` to trigger","required":true,"schema":{"type":"string"}},{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsTrainByModelIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsTrainByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/train/action":{"post":{"tags":["Models Training"],"description":"Trigger an action on a model training: cancel","operationId":"PostModelTrainingActionByModelId","parameters":[{"name":"modelId","in":"path","description":"The `modelId` being trained","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelTrainingActionByModelIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelTrainingActionByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/training-images":{"post":{"tags":["Models Training"],"description":"Add a new training image to the given `modelId`","operationId":"PostModelsTrainingImagesByModelId","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"modelId","in":"path","description":"The `modelId` where the training image will be stored","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsTrainingImagesByModelIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsTrainingImagesByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/training-images/pairs":{"put":{"tags":["Models Training"],"description":"Replace all training image pairs for the given `modelId`","operationId":"PutModelsTrainingImagesPairsByModelId","parameters":[{"name":"modelId","in":"path","description":"The `modelId` where the training image pairs will be stored","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsTrainingImagesPairsByModelIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsTrainingImagesPairsByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/training-images/{trainingImageId}":{"put":{"tags":["Models Training"],"description":"Replace the given `trainingImageId` for the given `modelId`","operationId":"PutModelsTrainingImagesByModelIdAndTrainingImageId","parameters":[{"name":"modelId","in":"path","description":"The training image's `modelId`","required":true,"schema":{"type":"string"}},{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"trainingImageId","in":"path","description":"The training image's `trainingImageId` to replace","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsTrainingImagesByModelIdAndTrainingImageIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutModelsTrainingImagesByModelIdAndTrainingImageIdResponse"}}}}},"security":[{"Authorizer":[]}]},"delete":{"tags":["Models Training"],"description":"Delete the given `trainingImageId` from the given `modelId`","operationId":"DeleteModelsTrainingImagesByModelIdAndTrainingImageId","parameters":[{"name":"modelId","in":"path","description":"The training image's `modelId`","required":true,"schema":{"type":"string"}},{"name":"trainingImageId","in":"path","description":"The training image's `trainingImageId` to delete","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteModelsTrainingImagesByModelIdAndTrainingImageIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/models/{modelId}/transfer":{"post":{"tags":["Models"],"description":"Transfer (with a copy or a full ownership change) a model to a new owner, including all of its training images","operationId":"PostModelsTransferByModelId","parameters":[{"name":"modelId","in":"path","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsTransferRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostModelsTransferByModelIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/oscu/prices":{"get":{"tags":["Pricing"],"description":"Get the public Prepaid Compute Units (or OSCU for One Shot Compute Units) price details","operationId":"GetPublicOscuPrices","responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetPublicOscuPricesResponse"}}}}}}},"/recommendations/models":{"get":{"tags":["Discoverability"],"description":"List recommended models matching the given filters","operationId":"GetRecommendationsModels","parameters":[{"name":"capabilities","in":"query","description":"Filter models by capabilities. Multiple values comma-separated.\nExamples: `txt2img`, `img2img`, `inpaint`, `controlnet`, `txt2img,img2img,2img`. Also accepts prefixes or suffixes values such as `txt2`, `img2`, `inpaint`, `control`. Default: no filter","required":false,"schema":{"type":"string"}},{"name":"excludeModelIds","in":"query","description":"Exclude specific models by their IDs. Multiple IDs comma-separated. Example: `model1,model2,model3`. Default: no exclusions","required":false,"schema":{"type":"string"}},{"name":"limit","in":"query","description":"The maximum number of models to return. Default: `10`, Maximum: `30`","required":false,"schema":{"type":"string"}},{"name":"nextToken","in":"query","description":"Pagination token to retrieve the next page of results. Use the `nextToken` from the previous response. Default: first page","required":false,"schema":{"type":"string"}},{"name":"privacy","in":"query","description":"Filter models by privacy level.\nDefault: `private`. Values: `private`, `public`","required":false,"schema":{"type":"string"}},{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}},{"name":"type","in":"query","description":"Filter models by type.\nExamples: `flux.1`, `flux.1-lora`, etc. Default: no filter","required":false,"schema":{"type":"string"}},{"name":"tags","in":"query","description":"Filter models by tags. Multiple tags comma-separated.\nExample: `anime,portrait,style`. Default: no filter","required":false,"schema":{"type":"string"}},{"name":"excludeTypes","in":"query","description":"Exclude models by type. Multiple types comma-separated. Example: `sd-1_5,flux.1`. Default: no exclusions","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetRecommendationsModelsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/search/assets":{"post":{"tags":["Discoverability"],"description":"Search for assets.\nAt least one of the following fields must have a value: `query`, `filter`, `image`, or `images`.\n\n`image`, `images` are mutually exclusive.","operationId":"PostSearchAssets","parameters":[{"name":"originalAssets","in":"query","description":"If set to true, returns the original asset without transformation","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostSearchAssetsRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostSearchAssetsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/search/models":{"post":{"tags":["Discoverability"],"description":"Search for models.\nAt least one of the following fields must have a value: `query`, `filter`, `image`, or `images`.\n\n`image`, and `images` are mutually exclusive.","operationId":"PostSearchModels","parameters":[{"name":"originalModels","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostSearchModelsRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostSearchModelsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/tags":{"get":{"tags":["Tags"],"description":"List all tags in use for the given `projectId`","operationId":"GetTags","parameters":[{"name":"pageSize","in":"query","description":"The number of items to return in the response. The default value is 50, maximum value is 100, minimum value is 1","required":false,"schema":{"type":"string"}},{"name":"paginationToken","in":"query","description":"A token you received in a previous request to query the next page of items","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetTagsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/uploads":{"post":{"tags":["Upload"],"description":"Create a temporary upload URL for a file. Support multipart uploads. Return a list of URLs for each part of the file.","operationId":"PostUploads","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostUploadsRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostUploadsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/uploads/{uploadId}":{"get":{"tags":["Upload"],"description":"Get the details of an existing upload","operationId":"GetUploads","parameters":[{"name":"uploadId","in":"path","description":"The upload Id to retrieve","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetUploadsByIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/uploads/{uploadId}/action":{"post":{"tags":["Upload"],"description":"Trigger an action on upload","operationId":"PostUploadsAction","parameters":[{"name":"uploadId","in":"path","description":"The upload Id to retrieve","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostUploadsActionRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostUploadsActionResponse"}}}}},"security":[{"Authorizer":[]}]}},"/usages":{"get":{"tags":["Usage"],"description":"Provide usage data for the given filters. Such as consumed compute units, number of assets generated, etc. Maximum time range with custom startDate and endDate is 120 days. Granularity is calculated based on the time range.","operationId":"GetUsages","parameters":[{"name":"userId","in":"query","description":"The unique identifier of the user for the usage. If not provided, returns all usages for the team.","required":false,"schema":{"type":"string"}},{"name":"activityOffset","in":"query","description":"The offset for the activity data. Default is 0. If bad offset or empty, 0 will be returned. Must be a positive integer.","required":false,"schema":{"type":"string"}},{"name":"userIds","in":"query","description":"The unique identifiers of the users for filtering the usage data. If not provided, use all users. Can be one or more comma separated values.","required":false,"schema":{"type":"string"}},{"name":"type","in":"query","description":"The type of the usage data. Can be one or more comma separated values. Default is all types. If bad type or empty, all types will be returned.","required":false,"schema":{"type":"string"}},{"name":"endDate","in":"query","description":"The end date of the usage in ISO 8601 format. If not provided, use default timeRange. If provided, startDate is required.","required":false,"schema":{"type":"string"}},{"name":"projectIds","in":"query","description":"The project ids for filtering the usage data. If not provided, use all projects. Can be one or more comma separated values.","required":false,"schema":{"type":"string"}},{"name":"timeRange","in":"query","description":"The time range of the usage. If not provided, use default timeRange. If startDate and endDate provided, timeRange is ignored.","required":false,"schema":{"type":"string"}},{"name":"startDate","in":"query","description":"The start date of the usage in ISO 8601 format. If not provided, use default timeRange. If provided, endDate is required.","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetUsagesResponse"}}}}},"security":[{"Authorizer":[]}]}},"/webhooks/lambda/{jobId}":{},"/workflows":{"get":{"tags":["Workflows"],"description":"List workflows","operationId":"GetWorkflows","parameters":[{"name":"status","in":"query","description":"The status of the workflows to return. Only available when privacy=private/unlisted. Default undefined value returns all statuses for private, `ready` for public.","required":false,"schema":{"type":"string"}},{"name":"privacy","in":"query","description":"The privacy of the workflows to return. The default value is `private`, possible values are `private`, `public` and `unlisted`. Default value is `private`.","required":false,"schema":{"type":"string"}},{"name":"pageSize","in":"query","description":"The number of items to return in the response. The default value is 10, maximum value is 200, minimum value is 1","required":false,"schema":{"type":"string"}},{"name":"paginationToken","in":"query","description":"A token you received in a previous request to query the next page of items","required":false,"schema":{"type":"string"}},{"name":"tags","in":"query","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetWorkflowsResponse"}}}}},"security":[{"Authorizer":[]}]},"post":{"tags":["Workflows"],"description":"Create workflow","operationId":"PostWorkflows","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostWorkflowsRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PostWorkflowsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/workflows/tags":{"get":{"tags":["Workflows"],"description":"Get all unique tags from workflows in a project (all privacy levels). Optionally filter by status.","operationId":"GetWorkflowsTags","parameters":[{"name":"status","in":"query","description":"The status of the workflows to return. Only available when privacy=private/unlisted. Default undefined value returns all statuses for private, `ready` for public.","required":false,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetWorkflowsTagsResponse"}}}}},"security":[{"Authorizer":[]}]}},"/workflows/{workflowId}":{"get":{"tags":["Workflows"],"description":"Get workflow by ID","operationId":"GetWorkflowsByWorkflowId","parameters":[{"name":"workflowId","in":"path","description":"The workflow ID to retrieve","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GetWorkflowsByWorkflowIdResponse"}}}}},"security":[{"Authorizer":[]}]},"put":{"tags":["Workflows"],"description":"Update workflow","operationId":"PutWorkflowsByWorkflowId","parameters":[{"name":"workflowId","in":"path","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutWorkflowsByWorkflowIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutWorkflowsByWorkflowIdResponse"}}}}},"security":[{"Authorizer":[]}]},"delete":{"tags":["Workflows"],"description":"Delete workflow","operationId":"DeleteWorkflowsByWorkflowId","parameters":[{"name":"workflowId","in":"path","description":"The workflow ID to delete","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteWorkflowsByWorkflowIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/workflows/{workflowId}/run":{"put":{"tags":["Workflows"],"description":"Run a workflow","operationId":"PutWorkflowRunByWorkflowId","parameters":[{"name":"workflowId","in":"path","required":true,"schema":{"type":"string"}},{"name":"dryRun","in":"query","required":false,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutWorkflowsRunByWorkflowIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutWorkflowRunByWorkflowIdResponse"}}}}},"security":[{"Authorizer":[]}]}},"/workflows/{workflowId}/user-approval":{"put":{"tags":["Workflows"],"description":"Approve a user approval node in a workflow","operationId":"PutWorkflowUserApprovalByWorkflowId","parameters":[{"name":"workflowId","in":"path","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutWorkflowUserApprovalByWorkflowIdRequest"}}},"required":true},"responses":{"200":{"description":"200 response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PutWorkflowUserApprovalByWorkflowIdResponse"}}}}},"security":[{"Authorizer":[]}]}}},"servers":[{"url":"https://api.cloud.scenario.com/v1"}],"components":{"securitySchemes":{"Authorizer":{"type":"apiKey","name":"Authorization","in":"header","x-amazon-apigateway-authtype":"custom"}},"schemas":{"PostImg2imgIpAdapterInferencesRequest":{"type":"object","required":["modelId","prompt"],"properties":{"ipAdapterImageIds":{"type":"array","description":"The IpAdapter images as an AssetId. Will be ignored if the `ipAdapterImages` parameter is provided","items":{"type":"string"}},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelId":{"type":"string","description":"The model id to use for the inference"},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"ipAdapterImage":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImages` instead.\nThe IpAdapter image as a data url. Will be ignored if the `ipAdapterImages` parameter is provided."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"ipAdapterImages":{"type":"array","description":"The IpAdapter images as a data url.","items":{"type":"string"}},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"mask":{"type":"string","description":"The mask as a data URL, used to determine the area of change. The mask is a binary mask made out of white and black pixels. The white area is the one that will be replaced. (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAABiVBMVEUAAADw8PDCwsLExMT5+fn19fX8/Pz////+/v79/f2hoaH6+vrc3NxnZ2dDQ0P4+PhkZGTs7OzOzs6Ojo709PRiYmLd3d1paWkoKCji4uI9PT3n5+fe3t7z8/NISEiysrLg4ODk5OSYmJh/f3/u7u5lZWVRUVHS0tKIiIg+Pj7p6emXl5dUVFQYGBjKysqtra1TU1PT09M8PDwcHBzR0dHq6uoEBAQmJiZ8fHzm5ub7+/swMDCrq6uKioqpqalHR0c3NzdOTk6BgYF7e3uwsLCAgIB3d3empqaNjY06OjrW1tZhYWG0tLQgICBxcXEICAhPT0/o6OgkJCRzc3N5eXnV1dXj4+NKSkobGxtaWlpfX1/a2trBwcF2dnYlJSV9fX3Hx8eSkpJNTU1sbGyWlpYRERGCgoIMDAzPz8+MjIy4uLiTk5PNzc3X19cxMTGDg4MpKSm8vLxGRkavr69QUFAKCgoqKiq2trbt7e329vaGhobl5eVra2tZWVk4ODgzMzNcXFyurq63t7dzhmTOAAAFeElEQVR4nO3dZXMUaRQF4EBIOgkQdFncFhZfZPF1d3d3d3f95TthC6iQTE9kuk+Y93m+MpW6t8+p7mGkZ2gIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACK8MDyR784eTw9BRkj1RU70rPQuvFqmjXpeWjVPdUMn6VnokUz86+qjemhaM3obAWoqvRYtGX2/DWgFA93K8DK9GS0Yku3AlTr06PRht+6FqB6Mj0bLVjfvQCeBpRgb00BRtLD0YKaAjgFlKCuAF4ULsAKp4CyHakrgLcGC1BXAKeAAihA4cbqCnAyPR2N211XgNH0dDRup2tA4RSgcLUFOJaejsbVFuC59HQ0bltdAW5KT0fjVtcV4GB6OppXV4C30sPRvLoCHE0PR/PqCvBEejiaV1eA9Gy0QAEKV5P/WHo2WlBTgHXp2WhBTQFuSM9GCzwFKNv3ngKU7WL3ApxOz0YLXAHKNqwAZavJf0V6NlpQU4Bd6dloXpd7BLkClKImfwUogO+Glu3VuvwVYPDV5q8AA2+lAhTtVH3+CjDoeuRf3ZgekEYt61WAi+kJaVLt98JdAwbe/b3z14ABdn4u+VfVY+k5acbNc8u/qjalJ6UJd881/47t6WHpu2PzyL/yCzID56755T/llvTM9M8H88+/Y5+XhQbE2QXlP2XVg5/cfnrPmvHLtxYcG3nhu+dXp/dhfmpvCLIw4+mdmLtn+59/xzPptZirRvL3AeLrRkP5uwxcJxrL3y8MLSVP/XHr7P/QYP5VtbbdJelm/7RYduza+ebXmzdfakSj+XvrcIn4tOGYu9uQXp2O92P5u5vAUjDnt3mbkF6exi/z9X5Mb89r0QLcl16faP7uLR9X+1XP5qXXJ5u/AqT1/KZHs35J71+8bP5OAGl7svn/nd6/eNn8t6TXJ5r/4fT27Evm/1N6e6InALeVXwKC+b+c3p2h8FOA29Lbc2+0ANXe9P7FO5MtQPVN+gCULpx/VQ2nj0Dh0vl7KTgsHb/vhoSl46+cArLS6XcsTx+Dkv2QTr/jxfRBKNnT6fQ7VqUPQsneS6dfuZ9Y1IZ0+h3b0gehZHU//9eWC+mDULLwB0Iv8b+AoLHe+TQufQyKlg6/8kpgVjr9ygkgK51+Vd2ZPgRlS8dffZ4+AoVL5+9u0mHh/Hem9y9eMv0xNxLPG0k2IL08Q0PLkwXwCkBe9s2gX9PrcyJaABeBvGwBjqfXJ1sAp4A4BShc+BZxbhaf9ki2AI+n9ydbgNfT65MtwD/p9fk5WoAP0+uTPQVsTm9PtgDp5cm+H3QmvTxD0VNAenWmrIvlfzC9OpeE4h87ml6c/2XuF74svTZXBOIfOZVemqv29w6sv/79KL0y00y0m/+59L5ca0u7DUivywyrekQ2+vGhPj5VWJdelxkOdAtrbNvE5ceM960ByU2Z3UMzY1q56cj0x1xQgEG26WpA4wfWfjnrY/p0SxG/FbI0vTP1EcHDq7fWPOTtvhTgfGsr0Xf9uLHUV+klWIQ/F38deCO9A4uz2E+TH0ovwGIt7ovl6enpg4nRBefvi+EDYniBLw29lB6cvhlewNMBN4gfMOcmT9yxfe4XhInef5Hr0dmtk5NbJ799Ze36uvg3/pWek+btXdkl/jW/p0ejLbuufXYwtvvd9EwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPfwHLuRXafg0SKQAAAAASUVORK5CYII=\")"},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"ipAdapterImageId":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImageIds` instead.\nThe IpAdapter image as an AssetId. Cannot be set if `ipAdapterImage` is provided. Will be ignored if the `ipAdapterImageIds` parameter is provided."},"ipAdapterScale":{"type":"number","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterScales` instead.\nIpAdapter scale factor (within [0.0, 1.0], default: 0.9). Will be ignored if the `ipAdapterScales` parameter is provided","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"The mask as an AssetId. Will be ignored if the `image` parameter is provided"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}}},"PostWebhooksLambdaJobIdResponse":{"type":"object","properties":{"message":{"type":"string"}}},"GetWorkflowsResponse":{"type":"object","required":["workflows"],"properties":{"nextPaginationToken":{"type":"string","description":"A token to query the next page of workflows"},"workflows":{"type":"array","items":{"type":"object","properties":{"thumbnail":{"type":"object","description":"Currently the thumbnail is identical to the after asset.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"before":{"type":"object","description":"A representation of an asset before being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"inputs":{"type":"array","description":"The inputs of the workflow.","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"description":{"type":"string","description":"The description of the workflow."},"privacy":{"type":"string","enum":["private","public","unlisted"]},"uiConfig":{"type":"object","description":"The UI configuration for the workflow. This is managed by scenario webapp.","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"shortDescription":{"type":"string"},"authorId":{"type":"string"},"ownerId":{"type":"string"},"editorInfo":{"type":"object","description":"The UI data about the workflow. This is managed by scenario webapp.","properties":{}},"createdAt":{"type":"string","description":"ISO string"},"tagSet":{"type":"array","description":"The tag set of the workflow.","items":{"type":"string"}},"name":{"type":"string"},"after":{"type":"object","description":"A representation of an asset after being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"id":{"type":"string"},"flow":{"type":"array","description":"The flow of the workflow.","items":{"type":"object","properties":{"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."}},"required":["id","type"]}},"outputAssetKinds":{"type":"array","items":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]}},"status":{"type":"string","enum":["deleted","draft","ready"]},"updatedAt":{"type":"string","description":"ISO string"}},"required":["authorId","createdAt","description","editorInfo","flow","id","inputs","name","ownerId","privacy","status","tagSet","updatedAt"]}}},"description":"Returns a paginated list of workflows. Supports both public access (via the `Authorization` header set to `public-auth-token`) and authenticated user access (including API keys). Public responses exclude some attributes."},"PutWebhookEndpointsByIdResponse":{"type":"object","required":["webhookEndpoint"],"properties":{"webhookEndpoint":{"type":"object","properties":{"createdAt":{"type":"string","description":"The date and time the webhook endpoint was created"},"nbTotalCalls":{"type":"number","description":"The number of calls to the webhook endpoint"},"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"nbFailedCalls":{"type":"number","description":"The number of calls to the webhook endpoint that have failed"},"description":{"type":"string","description":"A description of the webhook endpoint"},"id":{"type":"string","description":"The ID of the webhook endpoint"},"secret":{"type":"string","description":"The endpoint's secret, used to generate webhook signatures. Only returned at creation"},"ownerId":{"type":"string","description":"The ID of the owner of the webhook endpoint"},"enabled":{"type":"boolean","description":"Whether the webhook is enabled"},"url":{"type":"string","description":"The URL of the webhook endpoint"},"updatedAt":{"type":"string","description":"The date and time the webhook endpoint was updated"}},"required":["createdAt","enabled","enabledEvents","id","nbFailedCalls","nbTotalCalls","ownerId","updatedAt","url"]}}},"DeleteAssetResponse":{},"GetModelsClassesByModelIdResponse":{"type":"object","required":["classes"],"properties":{"nextPaginationToken":{"type":"string","description":"A token to query the next page of classes"},"classes":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]}}}},"DeleteModelsInferencesAllByModelIdResponse":{},"PostDownloadModelRequest":{"type":"object","properties":{"modelEpoch":{"type":"string","description":"The epoch hash of the model to download\nOnly available for Flux Lora Trained models with epochs\nWill only apply to the main model in the download request\nIf not set, the default (latest or setup at model level) epoch will be used"}}},"PutModelsTrainByModelIdResponse":{"type":"object","required":["job","model"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"model":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"GetModelsByModelIdResponse":{"type":"object","required":["model"],"properties":{"model":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}}},"GetUploadsByIdResponse":{"type":"object","required":["upload"],"properties":{"upload":{"type":"object","properties":{"originalFileName":{"type":"string"},"fileName":{"type":"string"},"partsCount":{"type":"number"},"kind":{"type":"string","description":"The kind of the file once validated (example: \"model\")","enum":["3d","asset","audio","avatar","image","model","video"]},"errorMessage":{"type":"string"},"entityId":{"type":"string"},"source":{"type":"string","enum":["civitai","huggingface","multipart","other","url"]},"authorId":{"type":"string"},"ownerId":{"type":"string"},"url":{"type":"string"},"createdAt":{"type":"string"},"jobId":{"type":"string"},"fileSize":{"type":"number"},"provider":{"type":"string","enum":["civitai","huggingface","other"]},"parts":{"type":"array","items":{"type":"object","properties":{"number":{"type":"number"},"expires":{"type":"string"},"url":{"type":"string"}},"required":["expires","number","url"]}},"id":{"type":"string"},"config":{"type":"object","properties":{}},"contentType":{"type":"string"},"assetOptions":{"type":"object","properties":{"hide":{"type":"boolean","description":"Specify if the asset should be hidden from the user."},"collectionIds":{"type":"array","description":"The collection ids to add the asset to.","items":{"type":"string"}},"parentId":{"type":"string","description":"The parentId of the asset."}}},"status":{"type":"string","enum":["complete","failed","imported","pending","validated","validating"]},"updatedAt":{"type":"string"}},"required":["authorId","createdAt","fileName","id","kind","ownerId","source","status","updatedAt"]}}},"GetUsagesResponse":{"type":"object","properties":{"modelUsages":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string"},"points":{"type":"array","description":"The data points","items":{"type":"object","properties":{"cost":{"type":"number","description":"Cost for model usage"},"jobs":{"type":"number","description":"Number of jobs for the model usage"},"discount":{"type":"number","description":"The discount for model usage"},"time":{"type":"string","description":"The UTC ISO date of the point"}},"required":["cost","discount","jobs","time"]}}},"required":["modelId","points"]}},"activity":{"type":"array","items":{"type":"object","description":"Activity entries listed in the activities results order by time DESC\nDefault limit of 100 entries. If you have 100 entries in your result,\nyou can use the activityOffset parameter to get the next page of results.\nWe recommend you to make the same request with type=activity only (to avoid reload everything)\nAnd apply the activityOffset=100 to get the next page of results.\nContains basic infos about each relevant actions","properties":{"creativeUnitsCost":{"type":"number","description":"The Compute Units cost for this action"},"data":{"type":"object","description":"The additional data of the action","properties":{"isApiKey":{"type":"boolean","description":"Whether the action is an API key action"},"jobId":{"type":"string","description":"The job for this action"},"modelId":{"type":"string","description":"The model for this action"},"assetId":{"type":"string","description":"The asset for this action"},"collectionId":{"type":"string","description":"The collection for this action"}}},"action":{"type":"string","description":"The action name","enum":["asset","asset-privacy","background-removal","captioning","collection","collection-assets","collection-models","controlnet","controlnet-img2img","controlnet-inpaint","controlnet-ip-adapter","controlnet-texture","copy-asset","copy-model","creative-unit-cost","creative-unit-discount","custom","delete-asset","delete-collection","delete-collection-assets","delete-collection-models","delete-inference-image","delete-model","delete-model-preset","delete-oscu-auto-refill","delete-project-member","delete-subscription","delete-team-api-key","delete-team-invitations","delete-team-member","delete-training-images","describe-style","detection","download-assets","download-model","embed","generative-fill","image-prompt-editing","images-generation","img2img","img2img-ip-adapter","img2img-texture","inference","inpaint","inpaint-ip-adapter","model","model-preset","models-training","oscu","patch","pixelate","project","project-member","reframe","repaint","restyle","segmentation","skybox-base-360","skybox-upscale-360","start-train","subscription","subscription-seats","tag-asset","tag-model","team-api-key","team-member","texture","train-succeeded","training-images-to-model","transfer-model","txt2img","txt2img-ip-adapter","update-asset","update-collection","update-model","update-model-description","update-model-examples","update-oscu-auto-refill","update-project","update-subscription","update-team","update-team-member","upscale","vectorization"]},"time":{"type":"string","description":"The UTC ISO date of the point"},"projectId":{"type":"string","description":"The projectId of the project for this action"},"userId":{"type":"string","description":"The unique identifier of the user for this action"}},"required":["action","data","projectId","time","userId"]}},"entities":{"type":"object","properties":{"models":{"type":"array","items":{"type":"object","properties":{"name":{"type":"string","description":"The name of the model"},"id":{"type":"string","description":"The model ID"},"shortDescription":{"type":"string","description":"The short description of the model"}},"required":["id","name"]}},"assets":{"type":"array","items":{"type":"object","properties":{"metadata":{"type":"object","description":"Partial metadata of the asset","properties":{"type":{"type":"string","description":"The type of the asset","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]}},"required":["type"]},"kind":{"type":"string","description":"The kind of the asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"id":{"type":"string","description":"The asset ID"},"source":{"type":"string","description":"The source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"properties":{"type":"object","description":"The properties of the asset","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]}},"required":["id","kind","metadata","properties","source"]}},"collections":{"type":"array","items":{"type":"object","properties":{"name":{"type":"string","description":"The name of the collection"},"id":{"type":"string","description":"The collection ID"}},"required":["id","name"]}},"jobs":{"type":"array","items":{"type":"object","properties":{"metadata":{"type":"object","description":"The metadata of the job","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"id":{"type":"string","description":"The job ID"},"jobType":{"type":"string","description":"The job type","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"status":{"type":"string","description":"The status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["id","jobType","metadata","status"]}},"users":{"type":"array","items":{"type":"object","properties":{"isApiKey":{"type":"boolean","description":"Whether the user is an API key"},"apiKeyStatus":{"type":"string","description":"The API key status\n\nWill be available:\n- if the user is an API key","enum":["active","deleted","inactive"]},"apiKeyId":{"type":"string","description":"The API key ID\n\nWill be available:\n- if the user is an API key"},"fullName":{"type":"string","description":"The full name of the user\n\nWill be available:\n- if the user hasn't left the Scenario platform\n- if the user isn't an API key"},"avatar":{"type":"object","description":"The user avatar\n\nWill be available:\n- if the user hasn't left the Scenario platform\n- if the user isn't an API key","properties":{"assetId":{"type":"string","description":"ID of the asset used as thumbnail if provided, otherwise undefined"},"url":{"type":"string","description":"Signed URL of the assetId or free url if assetId is undefined"}}},"id":{"type":"string","description":"The user ID"},"email":{"type":"string","description":"The email of the user\n\nWill be available:\n- if the user hasn't left the Scenario platform\n- if the user isn't an API key"}},"required":["id","isApiKey"]}}}},"consumption":{"type":"array","items":{"type":"object","description":"Consumption entries sorted in descending order by consumption value.\n\nThere is no limit on the number of entries","properties":{"userId":{"type":"string","description":"The unique identifier of the user"},"value":{"type":"number","description":"The Compute Units consumption for the user"}},"required":["userId","value"]}},"usages":{"type":"array","items":{"type":"object","properties":{"granularity":{"type":"string","description":"Granularity for points (example: \"1d\", \"1h\", \"1m\", \"15m\")","enum":["15m","1d","1h","1m","30m","5m","7d"]},"usageName":{"type":"string","description":"Name of the usage points (example: \"images-generation\", \"generators-training\", \"background-removal\", \"upscale\", ...)","enum":["background-removal","captioning","creative-unit-cost","creative-unit-discount","custom","detection","image-prompt-editing","images-generation","models-training","patch","pixelate","repaint","restyle","segmentation","skybox-base-360","skybox-upscale-360","texture","upscale","vectorization"]},"points":{"type":"array","description":"The usage data points","items":{"type":"object","properties":{"time":{"type":"string","description":"The UTC ISO date of the point"},"value":{"type":"string","description":"Value of the point"}},"required":["time","value"]}}},"required":["granularity","points","usageName"]}}}},"PostWebhooksClerkSessionsResponse":{"type":"object","properties":{"message":{"type":"string"}}},"PostModelPresetByModelIdRequest":{"type":"object","required":["inferenceId"],"properties":{"isDefault":{"type":"boolean","description":"Whether this preset should be the default preset for the model","default":false},"inferenceId":{"type":"string","description":"The inference ID used to generate new images"}}},"PrivacyAssetResponse":{"type":"object","required":["asset"],"properties":{"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"PutAssetByAssetIdResponse":{"type":"object","required":["asset"],"properties":{"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"PutAssetsTagsByAssetIdResponse":{"type":"object","required":["added","deleted"],"properties":{"deleted":{"type":"array","description":"The list of deleted tags","items":{"type":"string"}},"added":{"type":"array","description":"The list of added tags","items":{"type":"string"}}}},"PostWebhooksClerkSessionsRequest":{"type":"object","required":["object"],"properties":{"object":{"type":"string"}}},"PutModelsTrainByModelIdRequest":{"type":"object","properties":{"parameters":{"type":"object","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}}}},"PostDownloadModelResponse":{"type":"object","required":["jobId"],"properties":{"jobId":{"type":"string","description":"The job id associated with the download request"}}},"PostModelTrainingActionByModelIdResponse":{"type":"object","required":["model"],"properties":{"model":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}}},"PostTxt2imgInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048}},"required":["prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PutModelsExamplesByModelIdRequest":{"type":"object","required":["assetIds"],"properties":{"assetIds":{"type":"array","description":"The list of asset ids to use as examples of the model","items":{"type":"string"}}}},"GetPublicAssetsResponse":{"type":"object","required":["assets"],"properties":{"nextPaginationToken":{"type":"string","description":"A token to query the next page of assets"},"assets":{"type":"array","items":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}}},"DeleteAssetsByCollectionIdRequest":{"type":"object","required":["assetIds"],"properties":{"assetIds":{"type":"array","description":"The ids of the assets to remove from the collection. (Max 49 at once)","items":{"type":"string"}}}},"PostPixelateInferencesRequest":{"type":"object","required":["image","pixelGridSize","removeNoise"],"properties":{"image":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\") to pixelate."},"pixelGridSize":{"type":"number","description":"The size of the pixel grid in the output image. Should be 16, 32, 64, 128, or 256."},"removeNoise":{"type":"boolean","description":"Reduce pixel art artifacts."},"removeBackground":{"type":"boolean","description":"Remove the background from the image.","default":false},"colorPalette":{"type":"array","description":"The color palette to use for the pixel art.","items":{"type":"array","description":"A color palette item is an array of integers with a length of 3 (example: [140, 143, 174]).\nThe integers must be within [0, 255]","items":{"type":"number"}}},"colorPaletteSize":{"type":"number","description":"If no colorPalette is provided, you can provide a palette size. Value should be between 2 and 256."}}},"PutWorkflowsByWorkflowIdResponse":{"type":"object","required":["workflow"],"properties":{"workflow":{"type":"object","properties":{"thumbnail":{"type":"object","description":"Currently the thumbnail is identical to the after asset.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"before":{"type":"object","description":"A representation of an asset before being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"inputs":{"type":"array","description":"The inputs of the workflow.","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"description":{"type":"string","description":"The description of the workflow."},"privacy":{"type":"string","enum":["private","public","unlisted"]},"uiConfig":{"type":"object","description":"The UI configuration for the workflow. This is managed by scenario webapp.","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"shortDescription":{"type":"string"},"authorId":{"type":"string"},"ownerId":{"type":"string"},"editorInfo":{"type":"object","description":"The UI data about the workflow. This is managed by scenario webapp.","properties":{}},"createdAt":{"type":"string","description":"ISO string"},"tagSet":{"type":"array","description":"The tag set of the workflow.","items":{"type":"string"}},"name":{"type":"string"},"after":{"type":"object","description":"A representation of an asset after being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"id":{"type":"string"},"flow":{"type":"array","description":"The flow of the workflow.","items":{"type":"object","properties":{"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."}},"required":["id","type"]}},"outputAssetKinds":{"type":"array","items":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]}},"status":{"type":"string","enum":["deleted","draft","ready"]},"updatedAt":{"type":"string","description":"ISO string"}},"required":["authorId","createdAt","description","editorInfo","flow","id","inputs","name","ownerId","privacy","status","tagSet","updatedAt"]}}},"GetPublicModelsByModelIdResponse":{"type":"object","required":["model"],"properties":{"model":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}}},"GetUserConfirmInvitationResponse":{},"GetModelsDescriptionByModelIdResponse":{"type":"object","required":["description"],"properties":{"description":{"type":"object","properties":{"models":{"type":"array","description":"The list of models referenced by the Markdown `{model}` tag in the description.","items":{"type":"object","properties":{"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"required":["id","privacy","type"]}},"assets":{"type":"array","description":"The list of assets referenced by the Markdown `{asset}` tag in the description.","items":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"url":{"type":"string","description":"Signed URL to get the asset content"}},"required":["authorId","id","kind","mimeType","ownerId","privacy","properties","source","url"]}},"value":{"type":"string","description":"The markdown description of the model (ex: `# My model`).\nWe allow the `{asset:<assetId>}` and `{model:<modelId>}` tags."}},"required":["assets","models","value"]}}},"PostGenerativeFillInferencesRequest":{"type":"object","required":["image"],"properties":{"image":{"type":"string","description":"The image from which the mask will be refilled generatively. Must reference an existing AssetId or be a data URL."},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.","minimum":0,"maximum":2147483647},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during the restyle.","minimum":1,"maximum":15},"maskId":{"type":"string","description":"The mask as an AssetId, used to determine the area to refill generatively. Will be ignored if the `mask` parameter is provided. Must reference an existing AssetId."},"prompt":{"type":"string","description":"A full text prompt to guide the repaint process."},"mask":{"type":"string","description":"The mask as a data URL, used to determine the area of change. The mask is a binary mask made out of white and black pixels. The white area is the one that will be replaced. (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAABiVBMVEUAAADw8PDCwsLExMT5+fn19fX8/Pz////+/v79/f2hoaH6+vrc3NxnZ2dDQ0P4+PhkZGTs7OzOzs6Ojo709PRiYmLd3d1paWkoKCji4uI9PT3n5+fe3t7z8/NISEiysrLg4ODk5OSYmJh/f3/u7u5lZWVRUVHS0tKIiIg+Pj7p6emXl5dUVFQYGBjKysqtra1TU1PT09M8PDwcHBzR0dHq6uoEBAQmJiZ8fHzm5ub7+/swMDCrq6uKioqpqalHR0c3NzdOTk6BgYF7e3uwsLCAgIB3d3empqaNjY06OjrW1tZhYWG0tLQgICBxcXEICAhPT0/o6OgkJCRzc3N5eXnV1dXj4+NKSkobGxtaWlpfX1/a2trBwcF2dnYlJSV9fX3Hx8eSkpJNTU1sbGyWlpYRERGCgoIMDAzPz8+MjIy4uLiTk5PNzc3X19cxMTGDg4MpKSm8vLxGRkavr69QUFAKCgoqKiq2trbt7e329vaGhobl5eVra2tZWVk4ODgzMzNcXFyurq63t7dzhmTOAAAFeElEQVR4nO3dZXMUaRQF4EBIOgkQdFncFhZfZPF1d3d3d3f95TthC6iQTE9kuk+Y93m+MpW6t8+p7mGkZ2gIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACK8MDyR784eTw9BRkj1RU70rPQuvFqmjXpeWjVPdUMn6VnokUz86+qjemhaM3obAWoqvRYtGX2/DWgFA93K8DK9GS0Yku3AlTr06PRht+6FqB6Mj0bLVjfvQCeBpRgb00BRtLD0YKaAjgFlKCuAF4ULsAKp4CyHakrgLcGC1BXAKeAAihA4cbqCnAyPR2N211XgNH0dDRup2tA4RSgcLUFOJaejsbVFuC59HQ0bltdAW5KT0fjVtcV4GB6OppXV4C30sPRvLoCHE0PR/PqCvBEejiaV1eA9Gy0QAEKV5P/WHo2WlBTgHXp2WhBTQFuSM9GCzwFKNv3ngKU7WL3ApxOz0YLXAHKNqwAZavJf0V6NlpQU4Bd6dloXpd7BLkClKImfwUogO+Glu3VuvwVYPDV5q8AA2+lAhTtVH3+CjDoeuRf3ZgekEYt61WAi+kJaVLt98JdAwbe/b3z14ABdn4u+VfVY+k5acbNc8u/qjalJ6UJd881/47t6WHpu2PzyL/yCzID56755T/llvTM9M8H88+/Y5+XhQbE2QXlP2XVg5/cfnrPmvHLtxYcG3nhu+dXp/dhfmpvCLIw4+mdmLtn+59/xzPptZirRvL3AeLrRkP5uwxcJxrL3y8MLSVP/XHr7P/QYP5VtbbdJelm/7RYduza+ebXmzdfakSj+XvrcIn4tOGYu9uQXp2O92P5u5vAUjDnt3mbkF6exi/z9X5Mb89r0QLcl16faP7uLR9X+1XP5qXXJ5u/AqT1/KZHs35J71+8bP5OAGl7svn/nd6/eNn8t6TXJ5r/4fT27Evm/1N6e6InALeVXwKC+b+c3p2h8FOA29Lbc2+0ANXe9P7FO5MtQPVN+gCULpx/VQ2nj0Dh0vl7KTgsHb/vhoSl46+cArLS6XcsTx+Dkv2QTr/jxfRBKNnT6fQ7VqUPQsneS6dfuZ9Y1IZ0+h3b0gehZHU//9eWC+mDULLwB0Iv8b+AoLHe+TQufQyKlg6/8kpgVjr9ygkgK51+Vd2ZPgRlS8dffZ4+AoVL5+9u0mHh/Hem9y9eMv0xNxLPG0k2IL08Q0PLkwXwCkBe9s2gX9PrcyJaABeBvGwBjqfXJ1sAp4A4BShc+BZxbhaf9ki2AI+n9ydbgNfT65MtwD/p9fk5WoAP0+uTPQVsTm9PtgDp5cm+H3QmvTxD0VNAenWmrIvlfzC9OpeE4h87ml6c/2XuF74svTZXBOIfOZVemqv29w6sv/79KL0y00y0m/+59L5ca0u7DUivywyrekQ2+vGhPj5VWJdelxkOdAtrbNvE5ceM960ByU2Z3UMzY1q56cj0x1xQgEG26WpA4wfWfjnrY/p0SxG/FbI0vTP1EcHDq7fWPOTtvhTgfGsr0Xf9uLHUV+klWIQ/F38deCO9A4uz2E+TH0ovwGIt7ovl6enpg4nRBefvi+EDYniBLw29lB6cvhlewNMBN4gfMOcmT9yxfe4XhInef5Hr0dmtk5NbJ799Ze36uvg3/pWek+btXdkl/jW/p0ejLbuufXYwtvvd9EwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPfwHLuRXafg0SKQAAAAASUVORK5CYII=\")"},"negativePrompt":{"type":"string","description":"A negative full text prompt that discourages the repaint from generating certain characteristics. It is recommended to test without using a negative prompt."},"targetWidth":{"type":"number","description":"The target width of the output image.","minimum":0,"maximum":2048}}},"PostTxt2imgIpAdapterInferencesRequest":{"type":"object","required":["modelId","prompt"],"properties":{"ipAdapterImageIds":{"type":"array","description":"The IpAdapter images as an AssetId. Will be ignored if the `ipAdapterImages` parameter is provided","items":{"type":"string"}},"ipAdapterImageId":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImageIds` instead.\nThe IpAdapter image as an AssetId. Cannot be set if `ipAdapterImage` is provided. Will be ignored if the `ipAdapterImageIds` parameter is provided."},"ipAdapterScale":{"type":"number","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterScales` instead.\nIpAdapter scale factor (within [0.0, 1.0], default: 0.9). Will be ignored if the `ipAdapterScales` parameter is provided","minimum":0,"maximum":1},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelId":{"type":"string","description":"The model id to use for the inference"},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"ipAdapterImage":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImages` instead.\nThe IpAdapter image as a data url. Will be ignored if the `ipAdapterImages` parameter is provided."},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"ipAdapterImages":{"type":"array","description":"The IpAdapter images as a data url.","items":{"type":"string"}},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048}}},"PostControlnetTextureInferencesRequest":{"type":"object","required":["modality","modelId","prompt"],"properties":{"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelId":{"type":"string","description":"The model id to use for the inference"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"controlImageId":{"type":"string","description":"The controlnet input image as an AssetId. Will be ignored if the `controlnet` parameter is provided"},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"The controlnet input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\")"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}}},"PostRemoveBackgroundInferencesRequest":{"type":"object","required":["image"],"properties":{"image":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). If provided, image and name will be ignored."},"backgroundColor":{"type":"string","description":"The background color as an hexadecimal code (ex: \"#FFFFFF\"), an html color (ex: \"red\") or \"transparent\" if \"format\" is \"png\""},"format":{"type":"string","description":"The output format. Default is 'png'","enum":["jpeg","png"],"default":"'png'"}}},"ServiceUnavailableResponse":{"type":"object","required":["apiError","createdAt","error","id","infos","updatedAt"],"properties":{"createdAt":{"type":"string"},"apiError":{"type":"string"},"id":{"type":"string"},"error":{"type":"string"},"infos":{"type":"object","properties":{}},"updatedAt":{"type":"string"}}},"DeleteProjectWebhookEndpointByIdResponse":{},"PutCollectionsByCollectionIdResponse":{"type":"object","required":["collection"],"properties":{"collection":{"type":"object","properties":{"createdAt":{"type":"string","description":"The collection creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The thumbnail for the collection (if any)","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"name":{"type":"string","description":"The collection name"},"assetCount":{"type":"number"},"id":{"type":"string","description":"The collection ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"modelCount":{"type":"number"},"itemCount":{"type":"number"},"updatedAt":{"type":"string","description":"The collection last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["assetCount","createdAt","id","itemCount","modelCount","name","ownerId","updatedAt"]}}},"NotImplementedResponse":{"type":"object","required":["apiError","createdAt","error","id","infos","updatedAt"],"properties":{"createdAt":{"type":"string"},"apiError":{"type":"string"},"id":{"type":"string"},"error":{"type":"string"},"infos":{"type":"object","properties":{}},"updatedAt":{"type":"string"}}},"PostControlnetInpaintInferencesRequest":{"type":"object","required":["modality","modelId","prompt"],"properties":{"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelId":{"type":"string","description":"The model id to use for the inference"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"mask":{"type":"string","description":"The mask as a data URL, used to determine the area of change. The mask is a binary mask made out of white and black pixels. The white area is the one that will be replaced. (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAABiVBMVEUAAADw8PDCwsLExMT5+fn19fX8/Pz////+/v79/f2hoaH6+vrc3NxnZ2dDQ0P4+PhkZGTs7OzOzs6Ojo709PRiYmLd3d1paWkoKCji4uI9PT3n5+fe3t7z8/NISEiysrLg4ODk5OSYmJh/f3/u7u5lZWVRUVHS0tKIiIg+Pj7p6emXl5dUVFQYGBjKysqtra1TU1PT09M8PDwcHBzR0dHq6uoEBAQmJiZ8fHzm5ub7+/swMDCrq6uKioqpqalHR0c3NzdOTk6BgYF7e3uwsLCAgIB3d3empqaNjY06OjrW1tZhYWG0tLQgICBxcXEICAhPT0/o6OgkJCRzc3N5eXnV1dXj4+NKSkobGxtaWlpfX1/a2trBwcF2dnYlJSV9fX3Hx8eSkpJNTU1sbGyWlpYRERGCgoIMDAzPz8+MjIy4uLiTk5PNzc3X19cxMTGDg4MpKSm8vLxGRkavr69QUFAKCgoqKiq2trbt7e329vaGhobl5eVra2tZWVk4ODgzMzNcXFyurq63t7dzhmTOAAAFeElEQVR4nO3dZXMUaRQF4EBIOgkQdFncFhZfZPF1d3d3d3f95TthC6iQTE9kuk+Y93m+MpW6t8+p7mGkZ2gIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACK8MDyR784eTw9BRkj1RU70rPQuvFqmjXpeWjVPdUMn6VnokUz86+qjemhaM3obAWoqvRYtGX2/DWgFA93K8DK9GS0Yku3AlTr06PRht+6FqB6Mj0bLVjfvQCeBpRgb00BRtLD0YKaAjgFlKCuAF4ULsAKp4CyHakrgLcGC1BXAKeAAihA4cbqCnAyPR2N211XgNH0dDRup2tA4RSgcLUFOJaejsbVFuC59HQ0bltdAW5KT0fjVtcV4GB6OppXV4C30sPRvLoCHE0PR/PqCvBEejiaV1eA9Gy0QAEKV5P/WHo2WlBTgHXp2WhBTQFuSM9GCzwFKNv3ngKU7WL3ApxOz0YLXAHKNqwAZavJf0V6NlpQU4Bd6dloXpd7BLkClKImfwUogO+Glu3VuvwVYPDV5q8AA2+lAhTtVH3+CjDoeuRf3ZgekEYt61WAi+kJaVLt98JdAwbe/b3z14ABdn4u+VfVY+k5acbNc8u/qjalJ6UJd881/47t6WHpu2PzyL/yCzID56755T/llvTM9M8H88+/Y5+XhQbE2QXlP2XVg5/cfnrPmvHLtxYcG3nhu+dXp/dhfmpvCLIw4+mdmLtn+59/xzPptZirRvL3AeLrRkP5uwxcJxrL3y8MLSVP/XHr7P/QYP5VtbbdJelm/7RYduza+ebXmzdfakSj+XvrcIn4tOGYu9uQXp2O92P5u5vAUjDnt3mbkF6exi/z9X5Mb89r0QLcl16faP7uLR9X+1XP5qXXJ5u/AqT1/KZHs35J71+8bP5OAGl7svn/nd6/eNn8t6TXJ5r/4fT27Evm/1N6e6InALeVXwKC+b+c3p2h8FOA29Lbc2+0ANXe9P7FO5MtQPVN+gCULpx/VQ2nj0Dh0vl7KTgsHb/vhoSl46+cArLS6XcsTx+Dkv2QTr/jxfRBKNnT6fQ7VqUPQsneS6dfuZ9Y1IZ0+h3b0gehZHU//9eWC+mDULLwB0Iv8b+AoLHe+TQufQyKlg6/8kpgVjr9ygkgK51+Vd2ZPgRlS8dffZ4+AoVL5+9u0mHh/Hem9y9eMv0xNxLPG0k2IL08Q0PLkwXwCkBe9s2gX9PrcyJaABeBvGwBjqfXJ1sAp4A4BShc+BZxbhaf9ki2AI+n9ydbgNfT65MtwD/p9fk5WoAP0+uTPQVsTm9PtgDp5cm+H3QmvTxD0VNAenWmrIvlfzC9OpeE4h87ml6c/2XuF74svTZXBOIfOZVemqv29w6sv/79KL0y00y0m/+59L5ca0u7DUivywyrekQ2+vGhPj5VWJdelxkOdAtrbNvE5ceM960ByU2Z3UMzY1q56cj0x1xQgEG26WpA4wfWfjnrY/p0SxG/FbI0vTP1EcHDq7fWPOTtvhTgfGsr0Xf9uLHUV+klWIQ/F38deCO9A4uz2E+TH0ovwGIt7ovl6enpg4nRBefvi+EDYniBLw29lB6cvhlewNMBN4gfMOcmT9yxfe4XhInef5Hr0dmtk5NbJ799Ze36uvg3/pWek+btXdkl/jW/p0ejLbuufXYwtvvd9EwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPfwHLuRXafg0SKQAAAAASUVORK5CYII=\")"},"controlImageId":{"type":"string","description":"The controlnet input image as an AssetId. Will be ignored if the `controlnet` parameter is provided"},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"maskId":{"type":"string","description":"The mask as an AssetId. Will be ignored if the `image` parameter is provided"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"The controlnet input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\")"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}}},"PostImg2imgInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"maskId":{"type":"string","description":"Asset id of the mask image"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"}},"required":["image","imageId","prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PostDownloadAssetRequest":{"type":"object","properties":{"targetFormat":{"type":"string","description":"The format to download the asset in","enum":["gif","heif","jpeg","jpg","png","svg","webp","avif","tif","tiff","glb","fbx","obj"]}}},"PutImageUpscaleRequest":{"type":"object","properties":{"image":{"type":"string","description":"The image to upscale in base64 format string. It will be ignored if assetId is provided."},"seed":{"type":"string","description":"Reproduce a preceding result. Default: randomly generated number. If set, \"seed\" must be an integer between 0 and 2147483647 sent as string"},"scalingFactor":{"type":"number","description":"The scaling factor to apply to the image. One of 1, 2, 4, 8, 16."},"preset":{"type":"string","description":"Optimize the upscale for a specific use case. Precise: Upscale for high fidelity. Balanced: Upscale for a balance between fidelity and creativity. Creative: Upscale for creativity. Default: Balanced.","enum":["precise","balanced","creative"],"default":"balanced"},"negativePrompt":{"type":"string","description":"A negative full text prompt that discourages the upscale from generating certain characteristics. It is recommended to test without using a negative prompt. Default: empty string. Example: \"Low resolution, blurry, pixelated, noisy","default":""},"halfMode":{"type":"boolean","description":"Optimize tiling so centered props and characters on an image get better results. Setting it to false will enhance performances. Default: false.","default":false},"assetId":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). If provided, image and name will be ignored."},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Default: false. Use with caution.","default":false},"hdr":{"type":"number","description":"Increase definition and details. High value can lead to a loss of realism. Default: optimized for your `preset` and `style`.","minimum":0,"maximum":100},"name":{"type":"string","description":"The original file name of the image (example: \"low-res-image.jpg\"). It will be ignored if assetId is provided."},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your `preset` and `style`.","minimum":0,"maximum":100},"style":{"type":"string","description":"Optimize the upscale for a specific style. `standard` works in most cases. Use one of the other `styles` to refine the outputs. Default: `standard`.","enum":["standard","cartoon","anime","3d-rendered","comic","minimalist","photography"],"default":"standard"},"returnImage":{"type":"boolean","description":"If true, a placeholder version of the image being processed will be returned in the response.","default":false},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your `preset` and `style`.","minimum":0,"maximum":100},"colorCorrection":{"type":"boolean","description":"Apply a color correction to the resulting image hence increasing ressemblance. Default: true.","default":true},"prompt":{"type":"string","description":"A full text prompt to guide the upscale and forcing the generation of certain characteristics. Default: empty string. Example: \"UHD 8K hyper detailed studio photo of man face with yellow skin, anatomical++, disturbing+++, black background. Exploding brain. Bloody. Use the perspective of a standard lens to produce an image that is similar to what the human eye sees, creating a natural and realistic look.\"","default":""},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your `preset` and `style`.","minimum":0,"maximum":100}}},"PostControlnetImg2imgInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["controlImage","controlImageId","image","imageId","modality","prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PostProjectResponse":{"type":"object","required":["project"],"properties":{"project":{"type":"object","properties":{"createdAt":{"type":"string"},"descriptionDate":{"type":"string","description":"ISO string date when the description was last updated\nOnly returned on single project GET, not on list"},"privacyMode":{"type":"boolean","description":"Whether the project is privacyMode\nIn a privacyMode project, users can only see their own generated assets (except admins)"},"keys":{"type":"array","description":"The keys of the project (get from the ProjectRoles table)","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The API key creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"apiKeyStatus":{"type":"string","description":"The status of the API key","enum":["active","inactive"]},"role":{"type":"string","enum":["admin","editor","reader"]},"name":{"type":"string","description":"The API key name (example: \"eurv-scenario\")"},"apiKeyId":{"type":"string","description":"The API key ID (example: \"eyVcnFJcR92BxBkz7N6g5w\")"},"id":{"type":"string","description":"The identifier of the API key (example: \"eyVcnFJcR92BxBkz7N6g5w\")"},"updatedAt":{"type":"string","description":"The API key last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["apiKeyId","apiKeyStatus","createdAt","id","name","role","updatedAt"]}},"teamId":{"type":"string","description":"The team ID (example: \"team_eyVcnFJcR92BxBkz7N6g5w\")"},"name":{"type":"string","description":"The project name (example: \"Default Project\")"},"blacklist":{"type":"object","description":"Blacklist of models for the project","properties":{"models":{"type":"array","items":{"type":"string"}}},"required":["models"]},"description":{"type":"string","description":"High-level markdown description of the project\nOnly returned on single project GET, not on list"},"avatar":{"type":"object","description":"The project's avatar","properties":{"assetId":{"type":"string","description":"ID of the asset used as thumbnail if provided, otherwise undefined"},"url":{"type":"string","description":"Signed URL of the assetId or free url if assetId is undefined"}}},"id":{"type":"string","description":"The project ID (example: \"project_eyVcnFJcR92BxBkz7N6g5w\")"},"users":{"type":"array","description":"The users of the project (get from the ProjectRoles table)","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The user creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"role":{"type":"string","enum":["admin","editor","reader"]},"avatar":{"type":"object","description":"The user's avatar","properties":{"assetId":{"type":"string","description":"ID of the asset used as thumbnail if provided, otherwise undefined"},"url":{"type":"string","description":"Signed URL of the assetId or free url if assetId is undefined"}}},"id":{"type":"string","description":"The user ID (example: \"eyVcnFJcR92BxBkz7N6g5w\")"},"email":{"type":"string","description":"The user email (example: \"herve@scenario.com\")"},"updatedAt":{"type":"string","description":"The user last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","email","id","role","updatedAt"]}},"updatedAt":{"type":"string"}},"required":["createdAt","id","keys","name","privacyMode","teamId","updatedAt","users"]}}},"PostSegmentInferencesRequest":{"type":"object","required":["image"],"properties":{"checkpoint":{"type":"string","description":"The checkpoint to use","enum":["fastsam_x","sam_b","sam_h"],"default":"fastsam_x"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true}}},"PutAssetByAssetIdRequest":{"type":"object","properties":{"lockId":{"type":"string","description":"The value of the lock to use when updating a locked canvas."},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire."},"canvas":{"type":"string","description":"The new value for the canvas as a stringified JSON."},"thumbnail":{"type":"string","description":"The new thumbnail for the canvas in base64 format string."},"name":{"type":"string","description":"The new name for the canvas."},"description":{"type":"string","description":"The new description of the asset."},"disableSnapshot":{"type":"boolean","description":"If true, no snapshot will be created for this update."}}},"PostModelTrainingActionByModelIdRequest":{"type":"object","required":["action"],"properties":{"action":{"type":"string","description":"The action to perform on the model training","enum":["cancel"]}}},"GetRecommendationsModelsResponse":{"type":"object","required":["models"],"properties":{"models":{"type":"array","items":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}},"nextToken":{"type":"string","description":"A token to query the next page of recommendations"}}},"PutModelsByCollectionIdResponse":{"type":"object","required":["collection"],"properties":{"collection":{"type":"object","properties":{"createdAt":{"type":"string","description":"The collection creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The thumbnail for the collection (if any)","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"name":{"type":"string","description":"The collection name"},"assetCount":{"type":"number"},"id":{"type":"string","description":"The collection ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"modelCount":{"type":"number"},"itemCount":{"type":"number"},"updatedAt":{"type":"string","description":"The collection last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["assetCount","createdAt","id","itemCount","modelCount","name","ownerId","updatedAt"]}}},"GetAssetsResponse":{"type":"object","required":["assets"],"properties":{"nextPaginationToken":{"type":"string","description":"A token to query the next page of assets"},"assets":{"type":"array","items":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}}},"GetModelsScoresPromptByModelIdResponse":{"type":"object","required":["details","modelId","score","status"],"properties":{"score":{"type":"number","description":"The prompt's score for the input modelId\n\nThe score is a number between 0 and 1. The higher the score, the better the prompt is for the model","minimum":0,"maximum":1},"syntaxError":{"type":"string","description":"Present only if the prompt has one or more syntax errors\n\nThe API checks several syntax errors in the prompt","enum":["Prompt weight should be between 0 and 2","Prompt weight symbol should be - or + with a maximum length of 5. Ex: --, ++, ---, +++++","Unsupported prompt syntax: '[to:when], [from::when]'","Unsupported prompt syntax: 'word:<weight>' or '(word):<weight>'. Please use 'word<weight>' or '(word)<weight>' instead","Unsupported prompt syntax: --(s|c)ref","Unsupported prompt syntax: <name:weight> or <lora:name:weight>","Unsupported url in prompt"]},"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"details":{"type":"object","description":"Different components used to compute the final score","properties":{"captionsSimilarity":{"type":"object","description":"The Dice-Sørensen similarity between the prompt and each captions from the model's training images\n\nThe values are between 0 and 1. The higher the value, the more similar the prompt is to the training images' captions","properties":{"min":{"type":"number"},"median":{"type":"number"},"mean":{"type":"number"}},"required":["mean","median","min"]},"embeddingSimilarity":{"type":"number","description":"The cosine similarity between the prompt and the model's caption embedding\n\nThe value is a number between -1 and 1. The higher the value, the more similar the prompt is to the model's caption embedding"},"promptsSimilarity":{"type":"object","description":"The Dice-Sørensen similarity between the prompt and each prompts from the model's examples\n\nThe values are between 0 and 1. The higher the value, the more similar the prompt is to the prompts' examples","properties":{"min":{"type":"number"},"median":{"type":"number"},"mean":{"type":"number"}},"required":["mean","median","min"]}},"required":["captionsSimilarity","embeddingSimilarity"]},"status":{"type":"string","description":"The status of the prompt's score\n\nDetails if all the necessary data is present to compute the score","enum":["complete","incomplete","unknown"]}}},"PostModelsCopyRequest":{"type":"object","properties":{"copyExamples":{"type":"boolean","description":"true by default, the example images will be copied"},"copyAsTrained":{"type":"boolean","description":"If set to true, the training data will be copied"}}},"DeleteModelsByModelIdResponse":{},"PostPromptInferencesRequest":{"type":"object","required":["mode"],"properties":{"mode":{"type":"string","description":"The mode used to generate new prompt(s).","enum":["completion","contextual","image-editing","inventive","structured"],"default":"structured"},"ensureIPCleared":{"type":"boolean","description":"Whether we try to ensure IP removal for new prompt generation."},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")\n\nRequired when `mode` is `image-editing-prompt`."},"images":{"type":"array","description":"List of images used to condition the generation.\n\nImages are set a data URLs (example: \\\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\\\") or the asset IDs (example: \\\"asset_GTrL3mq4SXWyMxkOHRxlpw\\\").\n\nNotes:\n- in `contextual` mode, images condition prompt generation by using their actual descriptions as context\n- in all other modes, it supersedes the `modelId` parameter when provided.","items":{"type":"string"}},"seed":{"type":"number","description":"If specified, the API will make a best effort to produce the same results, such that repeated requests with the same `seed` and parameters should return the same outputs. Must be used along with the same parameters including prompt, model's state, etc.."},"modelId":{"type":"string","description":"The modelId used to condition the generation.\n\nWhen provided, the generation will take into account model's training images, examples.\n\nOnly supports 'gemini-2.0-flash', 'gemini-2.5-flash', 'gpt-image-1', 'flux-kontext' and 'runway-gen4-image' for now when `mode` is `image-editing-prompt`."},"temperature":{"type":"number","description":"The sampling temperature to use. Higher values like `0.8` will make the output more random, while lower values like `0.2` will make it more focused and deterministic.\n\nWe generally recommend altering this or `topP` but not both.","minimum":0,"maximum":2},"assetIds":{"type":"array","description":"The asset ids used to condition the generation.\n\nNotes:\n- supersedes the `modelId` parameter when provided.\n- ignored when `mode` is `image-editing-prompt`.","items":{"type":"string"}},"numResults":{"type":"number","description":"The number of results to return.","minimum":1,"maximum":5},"prompt":{"type":"string","description":"The initial prompt spark feed to `completion`, `inventive` or `structured` modes."},"topP":{"type":"number","description":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So `0.1` means only the tokens comprising the top `10%` probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.","minimum":0,"maximum":1}}},"PutProjectWebhookEndpointByIdResponse":{"type":"object","required":["webhookEndpoint"],"properties":{"webhookEndpoint":{"type":"object","properties":{"createdAt":{"type":"string","description":"The date and time the webhook endpoint was created"},"nbTotalCalls":{"type":"number","description":"The number of calls to the webhook endpoint"},"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"nbFailedCalls":{"type":"number","description":"The number of calls to the webhook endpoint that have failed"},"description":{"type":"string","description":"A description of the webhook endpoint"},"id":{"type":"string","description":"The ID of the webhook endpoint"},"secret":{"type":"string","description":"The endpoint's secret, used to generate webhook signatures. Only returned at creation"},"ownerId":{"type":"string","description":"The ID of the owner of the webhook endpoint"},"enabled":{"type":"boolean","description":"Whether the webhook is enabled"},"url":{"type":"string","description":"The URL of the webhook endpoint"},"updatedAt":{"type":"string","description":"The date and time the webhook endpoint was updated"}},"required":["createdAt","enabled","enabledEvents","id","nbFailedCalls","nbTotalCalls","ownerId","updatedAt","url"]}}},"PostModelsGetBulkResponse":{"type":"object","required":["models"],"properties":{"models":{"type":"array","items":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["id","privacy","type"]}}}},"DeleteAssetsByCollectionIdResponse":{"type":"object","required":["collection"],"properties":{"collection":{"type":"object","properties":{"createdAt":{"type":"string","description":"The collection creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The thumbnail for the collection (if any)","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"name":{"type":"string","description":"The collection name"},"assetCount":{"type":"number"},"id":{"type":"string","description":"The collection ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"modelCount":{"type":"number"},"itemCount":{"type":"number"},"updatedAt":{"type":"string","description":"The collection last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["assetCount","createdAt","id","itemCount","modelCount","name","ownerId","updatedAt"]}}},"PutModelsTrainingImagesPairsByModelIdResponse":{"type":"object","required":["count","pairs"],"properties":{"count":{"type":"number","description":"Number of training image pairs"},"pairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}}}},"PutModelsByModelIdResponse":{"type":"object","required":["model"],"properties":{"model":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}}},"PostDownloadAssetsRequest":{"type":"object","required":["options","query"],"properties":{"query":{"type":"object","properties":{"modelIds":{"type":"array","description":"All assets issued from the provided model ids will be included in the archive","items":{"type":"string"}},"inferenceIds":{"type":"array","description":"All assets issued from the provided inference ids will be included in the archive","items":{"type":"string"}},"assetIds":{"type":"array","description":"Every individual assets specified will be included in the archive","items":{"type":"string"}}},"required":["assetIds","inferenceIds","modelIds"]},"options":{"type":"object","properties":{"fileNameTemplate":{"type":"string","description":"A file naming convention as a string with the following available parameters:\n<seed> (seed used to generate the asset)\n<num> (index of the asset in the inference)\n<prompt> (prompt of the inference)\n<generator> (prompt of the generator)\nExample: \"<generator>-<prompt>-<num>-<seed>\""},"flat":{"type":"boolean","description":"Flag to prevent grouping assets in directories and store them flat"}},"required":["fileNameTemplate"]}}},"PostTranslateInferencesResponse":{"type":"object","required":["detectedLanguage","job","translation"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"translation":{"type":"string","description":"The translated prompt."},"detectedLanguage":{"type":"string","description":"Detected language of the input."},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"GetCanvasAssetSnapshotsResponse":{"type":"object","required":["snapshots"],"properties":{"snapshots":{"type":"array","items":{"type":"object","properties":{"takenAt":{"type":"number"},"rawData":{"type":"string"},"authorId":{"type":"string"},"hash":{"type":"string"}},"required":["authorId","hash","rawData","takenAt"]}},"nextPaginationToken":{"type":"string","description":"A token to query the next page of snapshots"}}},"PostSkyboxUpscale360InferencesRequest":{"type":"object","required":["image"],"properties":{"image":{"type":"string","description":"The 360 image to upscale. Must reference an existing AssetId or a data URL."},"styleFidelity":{"type":"number","description":"Condition the influence of the style image. The higher the value, the more the style image will influence the upscaled skybox image. Default: 80","minimum":0,"maximum":100},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.","minimum":0,"maximum":2147483647},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":8},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"negativePrompt":{"type":"string","description":"A negative full text prompt that discourages the skybox upscale from generating certain characteristics. It is recommended to test without using a negative prompt. Default: empty string. Example: \"Low resolution, blurry, pixelated, noisy.\""},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale.","minimum":0,"maximum":100},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"prompt":{"type":"string","description":"A full text prompt to guide the skybox upscale. Default: empty string. Example: \"a mountain landscape\""},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}}},"PutPromptGeneratorResponse":{"type":"object","required":["mode","newPredictions"],"properties":{"mode":{"type":"string","description":"The prompt generator mode used to generate the new predictions.\n\nThe mode might be different from the one provided in the request. Depending on the given paramters inputs.\nEx: if the structured mode is provided, but there is either a `modelId` provided or one or mode `assetId` provided, inventive mode will be used to match the determined prompt examples.","enum":["completion","contextual","image-captioning","image-editing","inventive","structured","style-description","translation"]},"synthesis":{"type":"string","description":"A short synthesis that describes the given asset(s) or asset(s) model.\n\nOnly available for `style-description`."},"newPredictions":{"type":"array","description":"The generated prompts new predictions.","items":{"type":"string"}},"detectedLanguage":{"type":"string","description":"Detected language of the input.\n\nOnly available for `translation` mode."}},"description":"The prompt generator API response"},"PutImagesDetectionRequest":{"type":"object","required":["modality"],"properties":{"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")."},"modality":{"type":"string","enum":["canny","depth","grayscale","lineart","lines","normal-map","pose","scribble","seg"]},"assetId":{"type":"string","description":"Deprecated: The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). Prefer to use image with the asset ID instead."},"name":{"type":"string","description":"The original file name of the image (example: \"low-res-image.jpg\"). It will be ignored if assetId is provided."},"parameters":{"type":"object","properties":{"thresholdMin":{"type":"number","description":"For Grayscale modality\nThe minimum threshold to apply to the grayscale (example: 10)."},"lowThreshold":{"type":"number","description":"For Canny modality\nThe threshold for the hysteresis procedure (example: 100)."},"removeBackground":{"type":"boolean","description":"For Grayscale modality\nWhether to remove the background (example: true).","default":true},"factor":{"type":"number","description":"For Grayscale modality\nThe factor to apply to the grayscale (example: 5)."},"highThreshold":{"type":"number","description":"For Canny modality\nThe threshold for the hysteresis procedure (example: 200).\nIt must be larger than lowThreshold.\nIt is recommended to be 2 or 3 times larger than lowThreshold.\nIf not provided, it will be set to 2 * lowThreshold."},"thresholdMax":{"type":"number","description":"For Grayscale modality\nThe maximum threshold to apply to the grayscale (example: 90)."}}}}},"ForbiddenResponse":{"type":"object","required":["apiError","createdAt","error","id","infos","updatedAt"],"properties":{"createdAt":{"type":"string"},"apiError":{"type":"string"},"id":{"type":"string"},"error":{"type":"string"},"infos":{"type":"object","properties":{}},"updatedAt":{"type":"string"}}},"PostInpaintInferencesRequest":{"type":"object","required":["modelId","prompt"],"properties":{"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelId":{"type":"string","description":"The model id to use for the inference"},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"maskId":{"type":"string","description":"The mask as an AssetId. Will be ignored if the `image` parameter is provided"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"mask":{"type":"string","description":"The mask as a data URL, used to determine the area of change. The mask is a binary mask made out of white and black pixels. The white area is the one that will be replaced. (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAABiVBMVEUAAADw8PDCwsLExMT5+fn19fX8/Pz////+/v79/f2hoaH6+vrc3NxnZ2dDQ0P4+PhkZGTs7OzOzs6Ojo709PRiYmLd3d1paWkoKCji4uI9PT3n5+fe3t7z8/NISEiysrLg4ODk5OSYmJh/f3/u7u5lZWVRUVHS0tKIiIg+Pj7p6emXl5dUVFQYGBjKysqtra1TU1PT09M8PDwcHBzR0dHq6uoEBAQmJiZ8fHzm5ub7+/swMDCrq6uKioqpqalHR0c3NzdOTk6BgYF7e3uwsLCAgIB3d3empqaNjY06OjrW1tZhYWG0tLQgICBxcXEICAhPT0/o6OgkJCRzc3N5eXnV1dXj4+NKSkobGxtaWlpfX1/a2trBwcF2dnYlJSV9fX3Hx8eSkpJNTU1sbGyWlpYRERGCgoIMDAzPz8+MjIy4uLiTk5PNzc3X19cxMTGDg4MpKSm8vLxGRkavr69QUFAKCgoqKiq2trbt7e329vaGhobl5eVra2tZWVk4ODgzMzNcXFyurq63t7dzhmTOAAAFeElEQVR4nO3dZXMUaRQF4EBIOgkQdFncFhZfZPF1d3d3d3f95TthC6iQTE9kuk+Y93m+MpW6t8+p7mGkZ2gIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACK8MDyR784eTw9BRkj1RU70rPQuvFqmjXpeWjVPdUMn6VnokUz86+qjemhaM3obAWoqvRYtGX2/DWgFA93K8DK9GS0Yku3AlTr06PRht+6FqB6Mj0bLVjfvQCeBpRgb00BRtLD0YKaAjgFlKCuAF4ULsAKp4CyHakrgLcGC1BXAKeAAihA4cbqCnAyPR2N211XgNH0dDRup2tA4RSgcLUFOJaejsbVFuC59HQ0bltdAW5KT0fjVtcV4GB6OppXV4C30sPRvLoCHE0PR/PqCvBEejiaV1eA9Gy0QAEKV5P/WHo2WlBTgHXp2WhBTQFuSM9GCzwFKNv3ngKU7WL3ApxOz0YLXAHKNqwAZavJf0V6NlpQU4Bd6dloXpd7BLkClKImfwUogO+Glu3VuvwVYPDV5q8AA2+lAhTtVH3+CjDoeuRf3ZgekEYt61WAi+kJaVLt98JdAwbe/b3z14ABdn4u+VfVY+k5acbNc8u/qjalJ6UJd881/47t6WHpu2PzyL/yCzID56755T/llvTM9M8H88+/Y5+XhQbE2QXlP2XVg5/cfnrPmvHLtxYcG3nhu+dXp/dhfmpvCLIw4+mdmLtn+59/xzPptZirRvL3AeLrRkP5uwxcJxrL3y8MLSVP/XHr7P/QYP5VtbbdJelm/7RYduza+ebXmzdfakSj+XvrcIn4tOGYu9uQXp2O92P5u5vAUjDnt3mbkF6exi/z9X5Mb89r0QLcl16faP7uLR9X+1XP5qXXJ5u/AqT1/KZHs35J71+8bP5OAGl7svn/nd6/eNn8t6TXJ5r/4fT27Evm/1N6e6InALeVXwKC+b+c3p2h8FOA29Lbc2+0ANXe9P7FO5MtQPVN+gCULpx/VQ2nj0Dh0vl7KTgsHb/vhoSl46+cArLS6XcsTx+Dkv2QTr/jxfRBKNnT6fQ7VqUPQsneS6dfuZ9Y1IZ0+h3b0gehZHU//9eWC+mDULLwB0Iv8b+AoLHe+TQufQyKlg6/8kpgVjr9ygkgK51+Vd2ZPgRlS8dffZ4+AoVL5+9u0mHh/Hem9y9eMv0xNxLPG0k2IL08Q0PLkwXwCkBe9s2gX9PrcyJaABeBvGwBjqfXJ1sAp4A4BShc+BZxbhaf9ki2AI+n9ydbgNfT65MtwD/p9fk5WoAP0+uTPQVsTm9PtgDp5cm+H3QmvTxD0VNAenWmrIvlfzC9OpeE4h87ml6c/2XuF74svTZXBOIfOZVemqv29w6sv/79KL0y00y0m/+59L5ca0u7DUivywyrekQ2+vGhPj5VWJdelxkOdAtrbNvE5ceM960ByU2Z3UMzY1q56cj0x1xQgEG26WpA4wfWfjnrY/p0SxG/FbI0vTP1EcHDq7fWPOTtvhTgfGsr0Xf9uLHUV+klWIQ/F38deCO9A4uz2E+TH0ovwGIt7ovl6enpg4nRBefvi+EDYniBLw29lB6cvhlewNMBN4gfMOcmT9yxfe4XhInef5Hr0dmtk5NbJ799Ze36uvg3/pWek+btXdkl/jW/p0ejLbuufXYwtvvd9EwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPfwHLuRXafg0SKQAAAAASUVORK5CYII=\")"}}},"DeleteModelsInferencesByModelIdResponse":{},"GetCollectionsResponse":{"type":"object","required":["collections"],"properties":{"nextPaginationToken":{"type":"string","description":"A token to query the next page of collections"},"collections":{"type":"array","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The collection creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The thumbnail for the collection (if any)","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"name":{"type":"string","description":"The collection name"},"assetCount":{"type":"number"},"id":{"type":"string","description":"The collection ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"modelCount":{"type":"number"},"itemCount":{"type":"number"},"updatedAt":{"type":"string","description":"The collection last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["assetCount","createdAt","id","itemCount","modelCount","name","ownerId","updatedAt"]}}}},"PostSearchModelsResponse":{"type":"object","required":["hits","limit","offset"],"properties":{"hits":{"type":"array","items":{"type":"object","properties":{"thumbnail":{"type":"object","description":"Thumbnail image information for the model","properties":{"assetId":{"type":"string","description":"ID of the asset used as thumbnail"},"url":{"type":"string","description":"Signed URL of the thumbnail"}},"required":["assetId","url"]},"capabilities":{"type":"array","description":"Array of model capabilities","items":{"type":"string"}},"parentModelId":{"type":"string","description":"ID of the parent model this model was derived from"},"trainingImagesNumber":{"type":"number","description":"Number of images used to train this model"},"exampleAssetIds":{"type":"array","description":"Array of example asset IDs associated with this model","items":{"type":"string"}},"privacy":{"type":"string","description":"Privacy setting of the model (\"public\", \"private\", or \"unlisted\")"},"shortDescription":{"type":"string","description":"Short description of the model"},"source":{"type":"string","description":"Source/origin of the model (e.g., \"training\", \"import\")"},"authorId":{"type":"string","description":"User ID of the model creator/author"},"ownerId":{"type":"string","description":"Project ID of the model owner"},"type":{"type":"string","description":"Type of the model (e.g., \"sd-1_5\", \"flux.1\")"},"tags":{"type":"array","description":"Array of user-assigned tags for categorization","items":{"type":"string"}},"createdAt":{"type":"string","description":"Creation timestamp in ISO 8601 format (e.g., \"2025-01-16T11:19:41.579Z\")"},"score":{"type":"number","description":"Score assigned to the model based on usage and popularity"},"concepts":{"type":"array","description":"Array of concept models associated with this model\nEach concept contains a reference to its source model","items":{"type":"object","properties":{"modelId":{"type":"string","description":"ID of the concept's source model"}},"required":["modelId"]}},"collectionIds":{"type":"array","description":"Array of collection IDs this model belongs to","items":{"type":"string"}},"complianceMetadata":{"type":"object","description":"Compliance metadata for the model","properties":{"subProcessor":{"type":"string","description":"Sub-processor used for the model"},"modelProvider":{"type":"string","description":"Provider of the model"},"licenseTerms":{"type":"string","description":"License terms of the model"},"dataProcessingComment":{"type":"string","description":"Data processing comment of the model"},"maintainer":{"type":"string","description":"Maintainer of the model"}}},"teamId":{"type":"string","description":"Team ID of the model owner"},"name":{"type":"string","description":"Display name of the model"},"id":{"type":"string","description":"Unique identifier for the model (e.g., \"model_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"Last modification timestamp in ISO 8601 format (e.g., \"2025-01-16T11:19:41.579Z\")"}},"required":["authorId","capabilities","collectionIds","concepts","createdAt","exampleAssetIds","id","name","ownerId","privacy","shortDescription","source","tags","teamId","trainingImagesNumber","type","updatedAt"]}},"offset":{"type":"number","description":"Number of documents skipped"},"hitsPerPage":{"type":"number","description":"Number of results on each page"},"totalHits":{"type":"number","description":"Exhaustive total number of matches"},"limit":{"type":"number","description":"Maximum number of documents returned"},"totalPages":{"type":"number","description":"Exhaustive total number of search result pages"},"page":{"type":"number","description":"Current search results page"},"estimatedTotalHits":{"type":"number","description":"Estimated total number of hits"}},"description":"Search result for model search"},"PutImagesPixelateRequest":{"type":"object","required":["pixelGridSize","removeNoise","returnImage"],"properties":{"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"assetId":{"type":"string","description":"Deprecated: The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). Prefer to use image with the asset ID instead."},"pixelGridSize":{"type":"number","description":"The size of the pixel grid in the output image. Should be 16, 32, 64, 128, or 256."},"name":{"type":"string","description":"The original file name of the image (example: \"low-res-image.jpg\"). It will be ignored if assetId is provided."},"removeNoise":{"type":"boolean","description":"Reduce pixel art artifacts."},"removeBackground":{"type":"boolean","description":"Remove the background from the image.","default":false},"returnImage":{"type":"boolean","description":"If true, the image will be returned in the response."},"colorPalette":{"type":"array","description":"The color palette to use for the pixel art.","items":{"type":"array","description":"A color palette item is an array of integers with a length of 3 (example: [140, 143, 174]).\nThe integers must be within [0, 255]","items":{"type":"number"}}},"colorPaletteSize":{"type":"number","description":"If no colorPalette is provided, you can provide a palette size. Value should be between 2 and 256."}}},"GetWebhookEventResponse":{"type":"object","required":["webhookEvent"],"properties":{"webhookEvent":{"type":"object","properties":{"createdAt":{"type":"string"},"payload":{"type":"object","properties":{}},"endpointId":{"type":"string"},"id":{"type":"string"},"sentAt":{"type":"string"},"type":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]},"attempts":{"type":"array","items":{"type":"object","properties":{"responseStatusCode":{"type":"number"},"sentAt":{"type":"string"}},"required":["responseStatusCode","sentAt"]}},"status":{"type":"string","enum":["created","failed","retrying","success"]},"updatedAt":{"type":"string"}},"required":["attempts","createdAt","endpointId","id","payload","sentAt","status","type","updatedAt"]}}},"PostDetectInferencesRequest":{"type":"object","required":["image","modality"],"properties":{"image":{"type":"string","description":"The image to be used to detect. Must reference an existing AssetId or be a data URL."},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1}}},"PostDownloadAssetResponse":{"type":"object","required":["url"],"properties":{"url":{"type":"string","description":"The signed URL to download the asset in the given format"}}},"GetUserNotificationsByTokenResponse":{"type":"object","required":["notifications"],"properties":{"notifications":{"type":"object","properties":{"model-validation-failed":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"creative-units-threshold-50":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"model-transfer":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"auto-refill-disabled":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"soft-deletion-first-call":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"creative-units-threshold-90":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"soft-deletion-last-call":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"creative-units-threshold-100":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"model-train-succeeded":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]}},"required":["auto-refill-disabled","creative-units-threshold-100","creative-units-threshold-50","creative-units-threshold-90","model-train-succeeded","model-transfer","model-validation-failed","soft-deletion-first-call","soft-deletion-last-call"]}}},"PostWebhooksClerkEmailsResponse":{"type":"object","properties":{"message":{"type":"string"}}},"DeleteAssetRequest":{"type":"object","required":["assetIds"],"properties":{"assetIds":{"type":"array","description":"The ids of the assets to delete. (Max 100 at once)","items":{"type":"string"}}}},"PutModelsTrainingImagesByModelIdAndTrainingImageIdResponse":{"type":"object","required":["trainingImage"],"properties":{"trainingImage":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}}},"PutImagesDetectionResponse":{"type":"object","required":["asset"],"properties":{"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"GetWorkflowsByWorkflowIdResponse":{"type":"object","required":["workflow"],"properties":{"workflow":{"type":"object","properties":{"thumbnail":{"type":"object","description":"Currently the thumbnail is identical to the after asset.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"before":{"type":"object","description":"A representation of an asset before being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"inputs":{"type":"array","description":"The inputs of the workflow.","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"description":{"type":"string","description":"The description of the workflow."},"privacy":{"type":"string","enum":["private","public","unlisted"]},"uiConfig":{"type":"object","description":"The UI configuration for the workflow. This is managed by scenario webapp.","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"shortDescription":{"type":"string"},"authorId":{"type":"string"},"ownerId":{"type":"string"},"editorInfo":{"type":"object","description":"The UI data about the workflow. This is managed by scenario webapp.","properties":{}},"createdAt":{"type":"string","description":"ISO string"},"tagSet":{"type":"array","description":"The tag set of the workflow.","items":{"type":"string"}},"name":{"type":"string"},"after":{"type":"object","description":"A representation of an asset after being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"id":{"type":"string"},"flow":{"type":"array","description":"The flow of the workflow.","items":{"type":"object","properties":{"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."}},"required":["id","type"]}},"outputAssetKinds":{"type":"array","items":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]}},"status":{"type":"string","enum":["deleted","draft","ready"]},"updatedAt":{"type":"string","description":"ISO string"}},"required":["authorId","createdAt","description","editorInfo","flow","id","inputs","name","ownerId","privacy","status","tagSet","updatedAt"]}}},"PostGenerateCustomResponse":{"type":"object","required":["job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"DeleteCollectionResponse":{},"PostModelsTrainingImagesByModelIdRequest":{"type":"object","properties":{"data":{"type":"string","description":"The training image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\")"},"assetId":{"type":"string","description":"The asset ID to use as a training image (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). If provided, \"data\" and \"name\" parameters will be ignored."},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"assetIds":{"type":"array","description":"The asset IDs to use as training images (example: [\"asset_GTrL3mq4SXWyMxkOHRxlpw\", \"asset_GTrL3mq4SXWyMxkOHRxlpw\"])\nUsed in batch mode, up to 10 asset IDs are allowed. Cannot be used with \"assetId\" or \"data\" and \"name\" parameters.","items":{"type":"string"}},"preset":{"type":"string","description":"The preset to use for training images","enum":["default","style","subject"]}}},"PostUploadsResponse":{"type":"object","required":["upload"],"properties":{"upload":{"type":"object","properties":{"originalFileName":{"type":"string"},"fileName":{"type":"string"},"partsCount":{"type":"number"},"kind":{"type":"string","description":"The kind of the file once validated (example: \"model\")","enum":["3d","asset","audio","avatar","image","model","video"]},"errorMessage":{"type":"string"},"entityId":{"type":"string"},"source":{"type":"string","enum":["civitai","huggingface","multipart","other","url"]},"authorId":{"type":"string"},"ownerId":{"type":"string"},"url":{"type":"string"},"createdAt":{"type":"string"},"jobId":{"type":"string"},"fileSize":{"type":"number"},"provider":{"type":"string","enum":["civitai","huggingface","other"]},"parts":{"type":"array","items":{"type":"object","properties":{"number":{"type":"number"},"expires":{"type":"string"},"url":{"type":"string"}},"required":["expires","number","url"]}},"id":{"type":"string"},"config":{"type":"object","properties":{}},"contentType":{"type":"string"},"assetOptions":{"type":"object","properties":{"hide":{"type":"boolean","description":"Specify if the asset should be hidden from the user."},"collectionIds":{"type":"array","description":"The collection ids to add the asset to.","items":{"type":"string"}},"parentId":{"type":"string","description":"The parentId of the asset."}}},"status":{"type":"string","enum":["complete","failed","imported","pending","validated","validating"]},"updatedAt":{"type":"string"}},"required":["authorId","createdAt","fileName","id","kind","ownerId","source","status","updatedAt"]}}},"PostModelsTrainingImagesByModelIdResponse":{"type":"object","required":["trainingImage"],"properties":{"trainingImage":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}}},"PostImg2imgTextureInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"maskId":{"type":"string","description":"Asset id of the mask image"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"}},"required":["image","imageId","prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PutModelsExamplesByModelIdResponse":{"type":"object","required":["examples"],"properties":{"examples":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"Model id of the model used to generate the asset"},"inferenceParameters":{"type":"object","description":"The inference parameters used to generate the asset","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"inferenceId":{"type":"string","description":"Inference id of the inference used to generate the asset"},"asset":{"type":"object","description":"Asset generated by the inference","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]},"job":{"type":"object","description":"The job associated with the asset","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}},"required":["asset","modelId"]}}}},"PutPromptGeneratorRequest":{"type":"object","properties":{"ensureIPCleared":{"type":"boolean","description":"Whether we try to ensure IP removal for new prompt generation."},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")\n\nNotes:\n- Only available when `mode` is `image-editing-prompt`.\n- in `contextual` mode, images condition prompt generation by using their actual descriptions as context\n- in all other modes, it supersedes the `modelId` parameter when provided."},"images":{"type":"array","description":"List of images used to condition the generation.\n\nImages are set a data URLs (example: \\\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\\\") or the asset IDs (example: \\\"asset_GTrL3mq4SXWyMxkOHRxlpw\\\").\n\nNotes:\n- in `contextual` mode, images condition prompt generation by using their actual descriptions as context\n- in all other modes, it supersedes the `modelId` parameter when provided.","items":{"type":"string"}},"seed":{"type":"number","description":"If specified, the API will make a best effort to produce the same results, such that repeated requests with the same `seed` and parameters should return the same outputs. Must be used along with the same parameters including prompt, model's state, etc.."},"modelId":{"type":"string","description":"The modelId used to condition the generation.\n\nWhen provided, the generation will take into account model's training images, examples.\n\nIn `contextual` mode, the modelId is used to retrieve additional context from the model such as its type and capabilities."},"assetIds":{"type":"array","description":"The asset ids used to condition the generation.","items":{"type":"string"}},"numResults":{"type":"number","description":"The number of results to return.","minimum":1,"maximum":5},"topP":{"type":"number","description":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So `0.1` means only the tokens comprising the top `10%` probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.","minimum":0,"maximum":1},"detailsLevel":{"type":"string","description":"The details level used to generate the captions.\n\nWhen a modelId is provided and examples are available, the details level is ignored.\n\nOnly available for `image-captioning` mode.","enum":["action","action+style"],"default":"'action'"},"mode":{"type":"string","description":"The prompt generation mode","enum":["completion","contextual","image-captioning","image-editing","inventive","structured","style-description","translation"],"default":"structured"},"unwantedSequences":{"type":"array","description":"Optional list of words sequences that should not be present in the generated prompts.","items":{"type":"string"}},"temperature":{"type":"number","description":"The sampling temperature to use. Higher values like `0.8` will make the output more random, while lower values like `0.2` will make it more focused and deterministic.\n\nWe generally recommend altering this or `topP` but not both.","minimum":0,"maximum":2},"prompt":{"type":"string","description":"The initial prompt used with the `completion`, `inventive`, `structured`, `image-editing` and `translation` modes.\n\nIn `completion`, `inventive` and `structured` modes, the prompt is truncated to roughly 70 words.\nIn `translation` mode, the prompt must be between 8 and no more than 512 characters."}}},"GetJobIdResponse":{"type":"object","required":["job"],"properties":{"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PutWorkflowsByWorkflowIdRequest":{"type":"object","properties":{"tagSet":{"type":"array","description":"The tag set of the workflow.","items":{"type":"string"}},"thumbnail":{"type":"string","description":"The thumbnail asset of the workflow. Set to null to unset the thumbnail."},"before":{"type":"string","description":"The before asset of the workflow. Set to null to unset the before asset."},"inputs":{"type":"array","description":"The inputs of the workflow.","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"name":{"type":"string","description":"The name of the workflow."},"description":{"type":"string","description":"The description of the workflow."},"privacy":{"type":"string","description":"The privacy of the workflow.","enum":["private","public","unlisted"]},"after":{"type":"string","description":"The after asset of the workflow. Set to null to unset the after asset."},"editorInfo":{"type":"object","description":"The UI data about the workflow. This is managed by scenario webapp.","properties":{}},"flow":{"type":"array","description":"The flow of the workflow.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"outputAssetIds":{"type":"array","description":"The asset IDs of the node.\nOnly available for nodes that produce assets.","items":{"type":"string"}},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"outputAssetKinds":{"type":"array","description":"The output asset kinds of the workflow.","items":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]}},"status":{"type":"string","description":"The status of the workflow.","enum":["draft","ready"]}}},"PostTxt2imgIpAdapterInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048}},"required":["prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"DeleteModelsByCollectionIdRequest":{"type":"object","required":["modelIds"],"properties":{"modelIds":{"type":"array","description":"The ids of the models to remove from the collection. (Max 49 at once)","items":{"type":"string"}}}},"PostInpaintIpAdapterInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["image","imageId","mask","maskId","prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PostCollectionRequest":{"type":"object","required":["name"],"properties":{"name":{"type":"string","description":"The name of the collection."}}},"NotFoundResponse":{"type":"object","required":["apiError","createdAt","error","id","infos","updatedAt"],"properties":{"createdAt":{"type":"string"},"apiError":{"type":"string"},"id":{"type":"string"},"error":{"type":"string"},"infos":{"type":"object","properties":{}},"updatedAt":{"type":"string"}}},"PutProjectWebhookEndpointByIdRequest":{"type":"object","properties":{"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"description":{"type":"string","description":"A description of the webhook endpoint"},"enabled":{"type":"boolean","description":"Whether the webhook is enabled"},"url":{"type":"string","description":"The URL of the webhook endpoint"}}},"BadRequestResponse":{"type":"object","required":["apiError","createdAt","error","id","infos","updatedAt"],"properties":{"createdAt":{"type":"string"},"apiError":{"type":"string"},"id":{"type":"string"},"error":{"type":"string"},"infos":{"type":"object","properties":{}},"updatedAt":{"type":"string"}}},"PostUpscaleInferencesRequest":{"type":"object","required":["image"],"properties":{"image":{"type":"string","description":"Image to upscale. Must reference an existing AssetId or be a data URL."},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.","minimum":0,"maximum":2147483647},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"preset":{"type":"string","description":"Optimize the upscale for a specific use case. Precise: Upscale for high fidelity. Balanced: Upscale for a balance between fidelity and creativity. Creative: Upscale for creativity.","enum":["balanced","creative","precise"],"default":"balanced"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"negativePrompt":{"type":"string","description":"A negative full text prompt that discourages the upscale from generating certain characteristics. It is recommended to test without using a negative prompt. Default: empty string. Example: \"Low resolution, blurry, pixelated, noisy.\""},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"style":{"type":"string","description":"Optimize the upscale for a specific style. standard works in most cases. Use one of the other styles to refine the outputs.","enum":["3d-rendered","anime","cartoon","comic","minimalist","photography","standard"],"default":"standard"},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"prompt":{"type":"string","description":"A full text prompt to guide the upscale and forcing the generation of certain characteristics. Default: empty string. Example: \"UHD 8K hyper detailed studio photo of man face with yellow skin, anatomical++, disturbing+++, black background. Bloody++.\""},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}}},"PostUploadsActionRequest":{"type":"object","required":["action"],"properties":{"action":{"type":"string","description":"The action to perform on an upload, currently only \"upload-complete\" is supported","enum":["complete"]}}},"PutImagePatchResponse":{"type":"object","required":["asset"],"properties":{"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"PutImagesEraseBackgroundResponse":{"type":"object","required":["asset"],"properties":{"image":{"type":"string","description":"The image without background in base64 format string only if returnImage is true."},"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"UnauthorizedResponse":{"type":"object","required":["apiError","createdAt","error","id","infos","updatedAt"],"properties":{"createdAt":{"type":"string"},"apiError":{"type":"string"},"id":{"type":"string"},"error":{"type":"string"},"infos":{"type":"object","properties":{}},"updatedAt":{"type":"string"}}},"PostDescribeStyleInferencesResponse":{"type":"object","required":["description","job","synthesis"],"properties":{"synthesis":{"type":"string","description":"A short synthesis that describes the given asset(s) or asset(s) model."},"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"description":{"type":"string","description":"The style of the given images."},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"DeleteModelsInferencesByModelIdAndInferenceIdResponse":{},"PostModelActionByModelIdResponse":{"type":"object","required":["model"],"properties":{"model":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}}},"PostWorkflowsResponse":{"type":"object","required":["workflow"],"properties":{"workflow":{"type":"object","properties":{"thumbnail":{"type":"object","description":"Currently the thumbnail is identical to the after asset.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"before":{"type":"object","description":"A representation of an asset before being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"inputs":{"type":"array","description":"The inputs of the workflow.","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"description":{"type":"string","description":"The description of the workflow."},"privacy":{"type":"string","enum":["private","public","unlisted"]},"uiConfig":{"type":"object","description":"The UI configuration for the workflow. This is managed by scenario webapp.","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"shortDescription":{"type":"string"},"authorId":{"type":"string"},"ownerId":{"type":"string"},"editorInfo":{"type":"object","description":"The UI data about the workflow. This is managed by scenario webapp.","properties":{}},"createdAt":{"type":"string","description":"ISO string"},"tagSet":{"type":"array","description":"The tag set of the workflow.","items":{"type":"string"}},"name":{"type":"string"},"after":{"type":"object","description":"A representation of an asset after being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"id":{"type":"string"},"flow":{"type":"array","description":"The flow of the workflow.","items":{"type":"object","properties":{"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."}},"required":["id","type"]}},"outputAssetKinds":{"type":"array","items":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]}},"status":{"type":"string","enum":["deleted","draft","ready"]},"updatedAt":{"type":"string","description":"ISO string"}},"required":["authorId","createdAt","description","editorInfo","flow","id","inputs","name","ownerId","privacy","status","tagSet","updatedAt"]}}},"PostJobActionByJobIdRequest":{"type":"object","properties":{"action":{"type":"string","description":"The action to execute on the job, such as canceling it. Today only cancel on inference jobs is supported.","enum":["cancel"]}}},"GetUserConfirmDeletionResponse":{},"PostSearchAssetsResponse":{"type":"object","required":["hits","limit","offset"],"properties":{"hits":{"type":"array","items":{"type":"object","properties":{"metadata":{"type":"object","description":"Metadata containing detailed information about the asset's properties and generation parameters","properties":{"thumbnail":{"type":"object","description":"Thumbnail image information for the asset. Ex. Canvas thumbnail","properties":{"assetId":{"type":"string","description":"ID of the asset used as thumbnail"}},"required":["assetId"]},"seed":{"type":"string","description":"Random seed used for generation, enables reproducibility"},"modelId":{"type":"string","description":"ID of the specific model used if asset is model-based"},"kind":{"type":"string","description":"Classification of the asset (e.g., \"image\", \"video\")"},"aspectRatio":{"type":"string","description":"Aspect ratio of the asset (e.g., \"1:1\", \"16:9\")"},"modelType":{"type":"string","description":"Type of model used for generation (e.g., \"sd-1_5\", \"flux.1\")"},"type":{"type":"string","description":"Specific type/category of the asset (e.g., \"canvas\", \"inference-txt\")"},"negativePrompt":{"type":"string","description":"Negative prompt used during generation to specify what to avoid"},"scheduler":{"type":"string","description":"Scheduler algorithm used during generation"},"size":{"type":"number","description":"File size of the asset in bytes"},"guidance":{"type":"number","description":"Guidance scale used for diffusion models to control prompt adherence"},"numInferenceSteps":{"type":"number","description":"Number of inference steps used during generation"},"parentJobId":{"type":"string","description":"ID of the parent job that created this asset"},"name":{"type":"string","description":"Custom name given to the asset"},"width":{"type":"number","description":"Width of the asset in pixels"},"negativePromptStrength":{"type":"number","description":"For Flux models: controls the influence of the negative prompt"},"text":{"type":"string","description":"Text used to generate the asset: mainly for speech to text"},"baseModelId":{"type":"string","description":"ID of the base model used for generation"},"prompt":{"type":"string","description":"Text prompt used to generate the asset"},"height":{"type":"number","description":"Height of the asset in pixels"}}},"thumbnail":{"type":"object","description":"Thumbnail preview of the asset 3D, video...","properties":{"assetId":{"type":"string","description":"ID of the asset used as thumbnail"},"url":{"type":"string","description":"Signed URL of the thumbnail"}},"required":["assetId","url"]},"transcription":{"type":"string","description":"Transcription of the asset. Can be either:\n- Automatically generated transcription for audio assets with speech"},"nsfw":{"type":"array","description":"Array of detected NSFW categories","items":{"type":"string"}},"description":{"type":"string","description":"Textual description of the asset. Can be either:\n- User-provided description for training images\n- Automatically generated caption for images and videos assets\n- Automatically generated description for audio assets"},"privacy":{"type":"string","description":"Privacy setting of the asset (\"public\", \"private\", or \"unlisted\")"},"mimeType":{"type":"string","description":"MIME type of the asset (e.g., \"image/png\", \"image/jpeg\")"},"authorId":{"type":"string","description":"User ID of the asset creator/author"},"ownerId":{"type":"string","description":"Project ID of the asset owner"},"url":{"type":"string","description":"Signed URL of the asset"},"tags":{"type":"array","description":"Array of user-assigned tags for categorization","items":{"type":"string"}},"createdAt":{"type":"string","description":"Creation timestamp in ISO 8601 format (e.g., \"2025-01-16T11:19:41.579Z\")"},"score":{"type":"number","description":"Score assigned to the asset based on usage and popularity"},"collectionIds":{"type":"array","description":"Array of collection IDs this asset belongs to","items":{"type":"string"}},"teamId":{"type":"string","description":"Team ID of the asset owner"},"id":{"type":"string","description":"Unique identifier for the asset (e.g., \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"Last modification timestamp in ISO 8601 format (e.g., \"2025-01-16T11:19:41.579Z\")"}},"required":["authorId","collectionIds","createdAt","id","metadata","mimeType","nsfw","ownerId","privacy","tags","teamId","updatedAt","url"]}},"offset":{"type":"number","description":"Number of documents skipped"},"hitsPerPage":{"type":"number","description":"Number of results on each page"},"totalHits":{"type":"number","description":"Exhaustive total number of matches"},"limit":{"type":"number","description":"Maximum number of documents returned"},"totalPages":{"type":"number","description":"Exhaustive total number of search result pages"},"page":{"type":"number","description":"Current search results page"},"estimatedTotalHits":{"type":"number","description":"Estimated total number of hits"}},"description":"Search result for asset search"},"PutModelsTagsByModelIdRequest":{"type":"object","properties":{"add":{"type":"array","description":"The list of tags to add","items":{"type":"string"}},"strict":{"type":"boolean","description":"If true, the function will throw an error if:\n- one of the tags to add already exists\n- one of the tags to delete is not found\nIf false, the endpoint will behave as if it was idempotent","default":true},"delete":{"type":"array","description":"The list of tags to delete","items":{"type":"string"}}}},"PostDescribeStyleInferencesRequest":{"type":"object","properties":{"ensureIPCleared":{"type":"boolean","description":"Whether we try to ensure IP removal for new prompt generation."},"images":{"type":"array","description":"List of images used to condition the generation.\n\nImages are set a data URLs (example: \\\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\\\") or the asset IDs (example: \\\"asset_GTrL3mq4SXWyMxkOHRxlpw\\\").\n\nNotes:\n- in `contextual` mode, images condition prompt generation by using their actual descriptions as context\n- in all other modes, it supersedes the `modelId` parameter when provided.","items":{"type":"string"}},"seed":{"type":"number","description":"If specified, the API will make a best effort to produce the same results, such that repeated requests with the same `seed` and parameters should return the same outputs. Must be used along with the same parameters including prompt, model's state, etc.."},"unwantedSequences":{"type":"array","description":"Optional list of words sequences that should not be present in the generated prompts.","items":{"type":"string"}},"modelId":{"type":"string","description":"The modelId used to condition the generation.\n\nWhen provided, the generation will take into account model's training images, examples.\n\nIn `contextual` mode, the modelId is used to retrieve additional context from the model such as its type and capabilities."},"temperature":{"type":"number","description":"The sampling temperature to use. Higher values like `0.8` will make the output more random, while lower values like `0.2` will make it more focused and deterministic.\n\nWe generally recommend altering this or `topP` but not both.","minimum":0,"maximum":2},"assetIds":{"type":"array","description":"The asset ids used to condition the generation.","items":{"type":"string"}},"topP":{"type":"number","description":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So `0.1` means only the tokens comprising the top `10%` probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.","minimum":0,"maximum":1}}},"PostCollectionResponse":{"type":"object","required":["collection"],"properties":{"collection":{"type":"object","properties":{"createdAt":{"type":"string","description":"The collection creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The thumbnail for the collection (if any)","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"name":{"type":"string","description":"The collection name"},"assetCount":{"type":"number"},"id":{"type":"string","description":"The collection ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"modelCount":{"type":"number"},"itemCount":{"type":"number"},"updatedAt":{"type":"string","description":"The collection last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["assetCount","createdAt","id","itemCount","modelCount","name","ownerId","updatedAt"]}}},"PostModelsTransferByModelIdResponse":{"type":"object","required":["model"],"properties":{"model":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}}},"DeleteModelsByCollectionIdResponse":{"type":"object","required":["collection"],"properties":{"collection":{"type":"object","properties":{"createdAt":{"type":"string","description":"The collection creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The thumbnail for the collection (if any)","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"name":{"type":"string","description":"The collection name"},"assetCount":{"type":"number"},"id":{"type":"string","description":"The collection ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"modelCount":{"type":"number"},"itemCount":{"type":"number"},"updatedAt":{"type":"string","description":"The collection last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["assetCount","createdAt","id","itemCount","modelCount","name","ownerId","updatedAt"]}}},"PutImagesEraseBackgroundRequest":{"type":"object","properties":{"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"backgroundColor":{"type":"string","description":"The background color as an hexadecimal code (ex: \"#FFFFff\"), an html color (ex: \"red\") or \"transparent\" if \"format\" is \"png\""},"assetId":{"type":"string","description":"Deprecated: The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). Prefer to use image with the asset ID instead."},"format":{"type":"string","description":"The output format","enum":["jpeg","png"]},"name":{"type":"string","description":"The original file name of the image (example: \"low-res-image.jpg\"). It will be ignored if assetId is provided."},"returnImage":{"type":"boolean","description":"If true, the image will be returned in the response."}}},"PostWebhooksLambdaJobIdRequest":{"type":"object","required":["jobId","status"],"properties":{"output":{"type":"object","description":"Output data (present when status is 'success')","properties":{}},"jobId":{"type":"string","description":"The job ID this webhook is for"},"error":{"type":"string","description":"Error message (present when status is 'failure')"},"status":{"type":"string","description":"Status of the Lambda execution","enum":["success","failure"]}}},"PostControlnetInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"image":{"type":"string","description":"Signed URL to display the input image"},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"imageId":{"type":"string","description":"Asset id of the input image"},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048}},"required":["image","imageId","modality","prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"UnlockAssetByAssetIdRequest":{"type":"object","properties":{"lockId":{"type":"string","description":"The value of the lock on this canvas."},"forceUnlock":{"type":"boolean","description":"If true, no need to pass a lockId."}}},"GetInferencesResponse":{"type":"object","required":["inferences"],"properties":{"nextPaginationToken":{"type":"string","description":"A token to query the next page of inferences"},"inferences":{"type":"array","items":{"type":"object","properties":{"images":{"type":"array","description":"The result images URLs (can change over time when inference is in progress)","items":{"type":"object","properties":{"seed":{"type":"string","description":"The seed used to generate this image"},"id":{"type":"string","description":"The image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The image URL"}},"required":["id","seed","url"]}},"modelId":{"type":"string","description":"The model ID on which the inference was done (example: \"model_GTrL3mq4SXWyMxkOHRxlpw\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"displayPrompt":{"type":"string","description":"The human-friendly prompt (without token)"},"ownerId":{"type":"string","description":"The owner user ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"createdAt":{"type":"string","description":"The inference creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"progress":{"type":"number","description":"The inference progress (within [0, 1])","minimum":0,"maximum":1},"id":{"type":"string","description":"The inference ID (example: \"inf_GTrL3mq4SXWyMxkOHRxlpw\")"},"imagesNumber":{"type":"number","description":"The total number of result images"},"parameters":{"type":"object","description":"The inference parameters","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"queue":{"type":"object","description":"The generation queue information (for generations with the \"queued\" status)","properties":{"approximateNumberOfRequests":{"type":"number","description":"The approximate number of requests in the generation queue"},"approximatePosition":{"type":"number","description":"The approximate position in the generation queue"}},"required":["approximateNumberOfRequests","approximatePosition"]},"status":{"type":"string","description":"The inference status","enum":["canceled","failed","in-progress","model-loading","new","queued","succeeded"]}},"required":["authorId","createdAt","displayPrompt","id","images","imagesNumber","modelId","ownerId","parameters","status","userId"]}}}},"PostSearchModelsRequest":{"type":"object","properties":{"filter":{"type":"string","description":"Filter queries by an attribute's value","default":"undefined"},"image":{"type":"string","description":"Search for model with `image` as a reference\n\nMust be an existing `AssetId` or a valid data URL."},"imageSemanticRatio":{"type":"number","description":"Image embedding ratio for hybrid search, applied when `image`, `images.like`, or `images.unlike`\nare provided","minimum":0,"maximum":1},"images":{"type":"object","description":"Search for model with `images.like` and `images.unlike` as a reference\n\nMust be an array of existing `AssetId` or valid data URLs.","properties":{"like":{"type":"array","description":"Search for model images with `images.like` as a reference\n\nMust be an array of existing `AssetId` or valid data URLs.","items":{"type":"string"}},"unlike":{"type":"array","description":"Search for model images that are not similar to `images.unlike` as a reference\n\nMust be an array of existing `AssetId` or valid data URLs.","items":{"type":"string"}}}},"offset":{"type":"number","description":"Number of documents to skip. Must be used with `limit`. Starts from 0.","minimum":0},"public":{"type":"boolean","description":"Search for public images not necessarily belonging to the current `ownerId`","default":false},"hitsPerPage":{"type":"number","description":"Maximum number of documents returned for a page. Must be used with `page`.","minimum":1,"maximum":100},"query":{"type":"string","description":"A string used for querying search results.","default":"''"},"limit":{"type":"number","description":"Maximum number of documents returned. Must be used with `offset`.","minimum":1,"maximum":100},"sortBy":{"type":"array","description":"Sort the search results by the given attributes. Each attribute in the list must be followed by a colon (`:`) and the preferred sorting order: either ascending (`asc`) or descending (`desc`).\n\nExample: `['createdAt:desc']`","items":{"type":"string"}},"page":{"type":"number","description":"Request a specific page of results. Must be used with `hitsPerPage`.","minimum":1},"querySemanticRatio":{"type":"number","description":"Query embedding for hybrid search, if possible","minimum":0,"maximum":1}},"description":"At least one of the following fields must have a value: `query`, `filter`, `image`, or `images`.\n\n`image`, and `images` are mutually exclusive."},"PostControlnetIpAdapterInferencesRequest":{"type":"object","required":["modality","modelId","prompt"],"properties":{"ipAdapterImageIds":{"type":"array","description":"The IpAdapter images as an AssetId. Will be ignored if the `ipAdapterImages` parameter is provided","items":{"type":"string"}},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelId":{"type":"string","description":"The model id to use for the inference"},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"ipAdapterImage":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImages` instead.\nThe IpAdapter image as a data url. Will be ignored if the `ipAdapterImages` parameter is provided."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"The IpAdapter images as a data url.","items":{"type":"string"}},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"controlImageId":{"type":"string","description":"The controlnet input image as an AssetId. Will be ignored if the `controlnet` parameter is provided"},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"ipAdapterImageId":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImageIds` instead.\nThe IpAdapter image as an AssetId. Cannot be set if `ipAdapterImage` is provided. Will be ignored if the `ipAdapterImageIds` parameter is provided."},"ipAdapterScale":{"type":"number","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterScales` instead.\nIpAdapter scale factor (within [0.0, 1.0], default: 0.9). Will be ignored if the `ipAdapterScales` parameter is provided","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"The controlnet input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\")"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}}},"PostUploadsRequest":{"type":"object","properties":{"fileName":{"type":"string","description":"Required for multipart upload. The original file name of the image (example: \"low-res-image.jpg\"). It will be ignored if assetId is provided."},"fileSize":{"type":"number","description":"Required for multipart upload. The size of the file in bytes"},"kind":{"type":"string","description":"Required for multipart upload and url. The purpose of the file once validated (example: \"model\")","enum":["3d","asset","audio","avatar","image","model","video"]},"civitaiModelUrl":{"type":"string","description":"The civitai.com url of the model (example: \"https://civitai.com/models/370194/translucent-subsurface-scattering-test?modelVersionId=413566\")."},"huggingFaceModelName":{"type":"string","description":"The huggingface.co modelName (example: \"stabilityai/stable-diffusion-xl-base-1.0\").\nNo need to setup other fields if you setup huggingFaceModelName"},"parts":{"type":"number","description":"Required for multipart upload. The number of parts the file will be uploaded in","minimum":1,"maximum":10000},"contentType":{"type":"string","description":"Required for multipart upload. The MIME type of the file (example: \"image/jpeg\")"},"url":{"type":"string","description":"The url where to download the file.\nIf you setup url you MUST setup kind as well."},"assetOptions":{"type":"object","description":"Asset extra options. Only available for kinds which produce an asset. (Not available for model kind)","properties":{"hide":{"type":"boolean","description":"Specify if the asset should be hidden from the user."},"collectionIds":{"type":"array","description":"The collection ids to add the asset to.","items":{"type":"string"}},"parentId":{"type":"string","description":"The parentId of the asset."}}}}},"GetModelsInferencesImagesByModelIdAndInferenceIdAndImageIdResponse":{"type":"object","required":["image"],"properties":{"image":{"type":"object","properties":{"seed":{"type":"string","description":"The seed used to generate this image"},"id":{"type":"string","description":"The image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The image URL"}},"required":["id","seed","url"]}}},"PostControlnetInferencesRequest":{"type":"object","required":["modality","modelId","prompt"],"properties":{"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelId":{"type":"string","description":"The model id to use for the inference"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"controlImageId":{"type":"string","description":"The controlnet input image as an AssetId. Will be ignored if the `controlnet` parameter is provided"},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"The controlnet input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\")"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}}},"PostWebhooksModalJobIdResponse":{"type":"object","properties":{"message":{"type":"string"}}},"PostControlnetInpaintIpAdapterInferencesRequest":{"type":"object","required":["modelId","prompt"],"properties":{"ipAdapterImageIds":{"type":"array","description":"The IpAdapter images as an AssetId. Will be ignored if the `ipAdapterImages` parameter is provided","items":{"type":"string"}},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelId":{"type":"string","description":"The model id to use for the inference"},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"ipAdapterImage":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImages` instead.\nThe IpAdapter image as a data url. Will be ignored if the `ipAdapterImages` parameter is provided."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"ipAdapterImages":{"type":"array","description":"The IpAdapter images as a data url.","items":{"type":"string"}},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"mask":{"type":"string","description":"The mask as a data URL, used to determine the area of change. The mask is a binary mask made out of white and black pixels. The white area is the one that will be replaced. (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAABiVBMVEUAAADw8PDCwsLExMT5+fn19fX8/Pz////+/v79/f2hoaH6+vrc3NxnZ2dDQ0P4+PhkZGTs7OzOzs6Ojo709PRiYmLd3d1paWkoKCji4uI9PT3n5+fe3t7z8/NISEiysrLg4ODk5OSYmJh/f3/u7u5lZWVRUVHS0tKIiIg+Pj7p6emXl5dUVFQYGBjKysqtra1TU1PT09M8PDwcHBzR0dHq6uoEBAQmJiZ8fHzm5ub7+/swMDCrq6uKioqpqalHR0c3NzdOTk6BgYF7e3uwsLCAgIB3d3empqaNjY06OjrW1tZhYWG0tLQgICBxcXEICAhPT0/o6OgkJCRzc3N5eXnV1dXj4+NKSkobGxtaWlpfX1/a2trBwcF2dnYlJSV9fX3Hx8eSkpJNTU1sbGyWlpYRERGCgoIMDAzPz8+MjIy4uLiTk5PNzc3X19cxMTGDg4MpKSm8vLxGRkavr69QUFAKCgoqKiq2trbt7e329vaGhobl5eVra2tZWVk4ODgzMzNcXFyurq63t7dzhmTOAAAFeElEQVR4nO3dZXMUaRQF4EBIOgkQdFncFhZfZPF1d3d3d3f95TthC6iQTE9kuk+Y93m+MpW6t8+p7mGkZ2gIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACK8MDyR784eTw9BRkj1RU70rPQuvFqmjXpeWjVPdUMn6VnokUz86+qjemhaM3obAWoqvRYtGX2/DWgFA93K8DK9GS0Yku3AlTr06PRht+6FqB6Mj0bLVjfvQCeBpRgb00BRtLD0YKaAjgFlKCuAF4ULsAKp4CyHakrgLcGC1BXAKeAAihA4cbqCnAyPR2N211XgNH0dDRup2tA4RSgcLUFOJaejsbVFuC59HQ0bltdAW5KT0fjVtcV4GB6OppXV4C30sPRvLoCHE0PR/PqCvBEejiaV1eA9Gy0QAEKV5P/WHo2WlBTgHXp2WhBTQFuSM9GCzwFKNv3ngKU7WL3ApxOz0YLXAHKNqwAZavJf0V6NlpQU4Bd6dloXpd7BLkClKImfwUogO+Glu3VuvwVYPDV5q8AA2+lAhTtVH3+CjDoeuRf3ZgekEYt61WAi+kJaVLt98JdAwbe/b3z14ABdn4u+VfVY+k5acbNc8u/qjalJ6UJd881/47t6WHpu2PzyL/yCzID56755T/llvTM9M8H88+/Y5+XhQbE2QXlP2XVg5/cfnrPmvHLtxYcG3nhu+dXp/dhfmpvCLIw4+mdmLtn+59/xzPptZirRvL3AeLrRkP5uwxcJxrL3y8MLSVP/XHr7P/QYP5VtbbdJelm/7RYduza+ebXmzdfakSj+XvrcIn4tOGYu9uQXp2O92P5u5vAUjDnt3mbkF6exi/z9X5Mb89r0QLcl16faP7uLR9X+1XP5qXXJ5u/AqT1/KZHs35J71+8bP5OAGl7svn/nd6/eNn8t6TXJ5r/4fT27Evm/1N6e6InALeVXwKC+b+c3p2h8FOA29Lbc2+0ANXe9P7FO5MtQPVN+gCULpx/VQ2nj0Dh0vl7KTgsHb/vhoSl46+cArLS6XcsTx+Dkv2QTr/jxfRBKNnT6fQ7VqUPQsneS6dfuZ9Y1IZ0+h3b0gehZHU//9eWC+mDULLwB0Iv8b+AoLHe+TQufQyKlg6/8kpgVjr9ygkgK51+Vd2ZPgRlS8dffZ4+AoVL5+9u0mHh/Hem9y9eMv0xNxLPG0k2IL08Q0PLkwXwCkBe9s2gX9PrcyJaABeBvGwBjqfXJ1sAp4A4BShc+BZxbhaf9ki2AI+n9ydbgNfT65MtwD/p9fk5WoAP0+uTPQVsTm9PtgDp5cm+H3QmvTxD0VNAenWmrIvlfzC9OpeE4h87ml6c/2XuF74svTZXBOIfOZVemqv29w6sv/79KL0y00y0m/+59L5ca0u7DUivywyrekQ2+vGhPj5VWJdelxkOdAtrbNvE5ceM960ByU2Z3UMzY1q56cj0x1xQgEG26WpA4wfWfjnrY/p0SxG/FbI0vTP1EcHDq7fWPOTtvhTgfGsr0Xf9uLHUV+klWIQ/F38deCO9A4uz2E+TH0ovwGIt7ovl6enpg4nRBefvi+EDYniBLw29lB6cvhlewNMBN4gfMOcmT9yxfe4XhInef5Hr0dmtk5NbJ799Ze36uvg3/pWek+btXdkl/jW/p0ejLbuufXYwtvvd9EwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPfwHLuRXafg0SKQAAAAASUVORK5CYII=\")"},"controlImageId":{"type":"string","description":"The controlnet input image as an AssetId. Will be ignored if the `controlnet` parameter is provided"},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"ipAdapterImageId":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImageIds` instead.\nThe IpAdapter image as an AssetId. Cannot be set if `ipAdapterImage` is provided. Will be ignored if the `ipAdapterImageIds` parameter is provided."},"ipAdapterScale":{"type":"number","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterScales` instead.\nIpAdapter scale factor (within [0.0, 1.0], default: 0.9). Will be ignored if the `ipAdapterScales` parameter is provided","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"The mask as an AssetId. Will be ignored if the `image` parameter is provided"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"The controlnet input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\")"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}}},"PutModelPresetByModelIdAndPresetIdResponse":{"type":"object","required":["preset"],"properties":{"preset":{"type":"object","properties":{"createdAt":{"type":"string","description":"The preset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"isDefault":{"type":"boolean","description":"Whether the preset is the default preset of the model (example: true)"},"modelId":{"type":"string","description":"The model ID (example: \"eyVcnFJcR92BxBkz7N6g5w\")"},"id":{"type":"string","description":"The preset ID (example: \"eyVcnFJcR92BxBkz7N6g5w\")"},"authorId":{"type":"string","description":"The author user ID (example: \"VFhihHKMRZyDDnZAJwLb2Q\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"VFhihHKMRZyDDnZAJwLb2Q\")"},"parameters":{"type":"object","description":"The inference parameters","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"updatedAt":{"type":"string","description":"The preset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["authorId","createdAt","id","isDefault","modelId","ownerId","parameters","updatedAt"]}}},"PostSegmentInferencesResponse":{"type":"object","required":["job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]},"segments":{"type":"array","items":{"type":"object","properties":{"checkpoint":{"type":"string","enum":["fastsam_x","sam_b","sam_h"]},"nbMasks":{"type":"number"},"images":{"type":"array","items":{"type":"string"}},"bbox":{"type":"array","items":{}},"masks":{"type":"array","items":{"type":"string"}},"message":{"type":"string"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}}},"required":["bbox","checkpoint","message","nbMasks"]}}}},"PostImg2imgTextureInferencesRequest":{"type":"object","required":["modelId","prompt"],"properties":{"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelId":{"type":"string","description":"The model id to use for the inference"},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"maskId":{"type":"string","description":"The mask as an AssetId. Will be ignored if the `image` parameter is provided"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"mask":{"type":"string","description":"The mask as a data URL, used to determine the area of change. The mask is a binary mask made out of white and black pixels. The white area is the one that will be replaced. (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAABiVBMVEUAAADw8PDCwsLExMT5+fn19fX8/Pz////+/v79/f2hoaH6+vrc3NxnZ2dDQ0P4+PhkZGTs7OzOzs6Ojo709PRiYmLd3d1paWkoKCji4uI9PT3n5+fe3t7z8/NISEiysrLg4ODk5OSYmJh/f3/u7u5lZWVRUVHS0tKIiIg+Pj7p6emXl5dUVFQYGBjKysqtra1TU1PT09M8PDwcHBzR0dHq6uoEBAQmJiZ8fHzm5ub7+/swMDCrq6uKioqpqalHR0c3NzdOTk6BgYF7e3uwsLCAgIB3d3empqaNjY06OjrW1tZhYWG0tLQgICBxcXEICAhPT0/o6OgkJCRzc3N5eXnV1dXj4+NKSkobGxtaWlpfX1/a2trBwcF2dnYlJSV9fX3Hx8eSkpJNTU1sbGyWlpYRERGCgoIMDAzPz8+MjIy4uLiTk5PNzc3X19cxMTGDg4MpKSm8vLxGRkavr69QUFAKCgoqKiq2trbt7e329vaGhobl5eVra2tZWVk4ODgzMzNcXFyurq63t7dzhmTOAAAFeElEQVR4nO3dZXMUaRQF4EBIOgkQdFncFhZfZPF1d3d3d3f95TthC6iQTE9kuk+Y93m+MpW6t8+p7mGkZ2gIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACK8MDyR784eTw9BRkj1RU70rPQuvFqmjXpeWjVPdUMn6VnokUz86+qjemhaM3obAWoqvRYtGX2/DWgFA93K8DK9GS0Yku3AlTr06PRht+6FqB6Mj0bLVjfvQCeBpRgb00BRtLD0YKaAjgFlKCuAF4ULsAKp4CyHakrgLcGC1BXAKeAAihA4cbqCnAyPR2N211XgNH0dDRup2tA4RSgcLUFOJaejsbVFuC59HQ0bltdAW5KT0fjVtcV4GB6OppXV4C30sPRvLoCHE0PR/PqCvBEejiaV1eA9Gy0QAEKV5P/WHo2WlBTgHXp2WhBTQFuSM9GCzwFKNv3ngKU7WL3ApxOz0YLXAHKNqwAZavJf0V6NlpQU4Bd6dloXpd7BLkClKImfwUogO+Glu3VuvwVYPDV5q8AA2+lAhTtVH3+CjDoeuRf3ZgekEYt61WAi+kJaVLt98JdAwbe/b3z14ABdn4u+VfVY+k5acbNc8u/qjalJ6UJd881/47t6WHpu2PzyL/yCzID56755T/llvTM9M8H88+/Y5+XhQbE2QXlP2XVg5/cfnrPmvHLtxYcG3nhu+dXp/dhfmpvCLIw4+mdmLtn+59/xzPptZirRvL3AeLrRkP5uwxcJxrL3y8MLSVP/XHr7P/QYP5VtbbdJelm/7RYduza+ebXmzdfakSj+XvrcIn4tOGYu9uQXp2O92P5u5vAUjDnt3mbkF6exi/z9X5Mb89r0QLcl16faP7uLR9X+1XP5qXXJ5u/AqT1/KZHs35J71+8bP5OAGl7svn/nd6/eNn8t6TXJ5r/4fT27Evm/1N6e6InALeVXwKC+b+c3p2h8FOA29Lbc2+0ANXe9P7FO5MtQPVN+gCULpx/VQ2nj0Dh0vl7KTgsHb/vhoSl46+cArLS6XcsTx+Dkv2QTr/jxfRBKNnT6fQ7VqUPQsneS6dfuZ9Y1IZ0+h3b0gehZHU//9eWC+mDULLwB0Iv8b+AoLHe+TQufQyKlg6/8kpgVjr9ygkgK51+Vd2ZPgRlS8dffZ4+AoVL5+9u0mHh/Hem9y9eMv0xNxLPG0k2IL08Q0PLkwXwCkBe9s2gX9PrcyJaABeBvGwBjqfXJ1sAp4A4BShc+BZxbhaf9ki2AI+n9ydbgNfT65MtwD/p9fk5WoAP0+uTPQVsTm9PtgDp5cm+H3QmvTxD0VNAenWmrIvlfzC9OpeE4h87ml6c/2XuF74svTZXBOIfOZVemqv29w6sv/79KL0y00y0m/+59L5ca0u7DUivywyrekQ2+vGhPj5VWJdelxkOdAtrbNvE5ceM960ByU2Z3UMzY1q56cj0x1xQgEG26WpA4wfWfjnrY/p0SxG/FbI0vTP1EcHDq7fWPOTtvhTgfGsr0Xf9uLHUV+klWIQ/F38deCO9A4uz2E+TH0ovwGIt7ovl6enpg4nRBefvi+EDYniBLw29lB6cvhlewNMBN4gfMOcmT9yxfe4XhInef5Hr0dmtk5NbJ799Ze36uvg3/pWek+btXdkl/jW/p0ejLbuufXYwtvvd9EwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPfwHLuRXafg0SKQAAAAASUVORK5CYII=\")"}}},"GetProjectWebhookEndpointEventsResponse":{"type":"object","required":["items"],"properties":{"items":{"type":"array","description":"A list of the project webhook events","items":{"type":"object","properties":{"createdAt":{"type":"string"},"payload":{"type":"object","properties":{}},"endpointId":{"type":"string"},"id":{"type":"string"},"sentAt":{"type":"string"},"type":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]},"attempts":{"type":"array","items":{"type":"object","properties":{"responseStatusCode":{"type":"number"},"sentAt":{"type":"string"}},"required":["responseStatusCode","sentAt"]}},"status":{"type":"string","enum":["created","failed","retrying","success"]},"updatedAt":{"type":"string"}},"required":["attempts","createdAt","endpointId","id","payload","sentAt","status","type","updatedAt"]}}}},"PostVectorizeInferencesRequest":{"type":"object","required":["image"],"properties":{"mode":{"type":"string","description":"Curver fitting mode `none`, `polygon`, `spline`","enum":["none","polygon","spline"],"default":"spline"},"image":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\") to vectorize."},"layerDifference":{"type":"number","description":"Represents the color difference between gradient layers (higher value will reduce the number of layers)\n\nOnly applicable to `color` colorMode.","minimum":0,"maximum":255},"maxIterations":{"type":"number","description":"Max iterations for rendering","minimum":1,"maximum":100000},"cornerThreshold":{"type":"number","description":"Minimum momentary angle (degree) to be considered a corner (higher value will smooth corners)\n\nOnly applicable to `spline` mode.","minimum":0,"maximum":180},"colorPrecision":{"type":"number","description":"Number of significant bits to use in an RGB channel, min 1, max 16 (higher value will increase precision)\n\nOnly applicable to `color` colorMode.","minimum":1,"maximum":16},"spliceThreshold":{"type":"number","description":"Minimum angle displacement (degree) to splice a spline (higher value reduce accuracy)\n\nOnly applicable to `spline` mode.","minimum":0,"maximum":180},"lengthThreshold":{"type":"number","description":"Minimum length of a segment (higher value will generate more coarse output)\n\nOnly applicable to `spline` mode.","minimum":3.5,"maximum":10},"colorMode":{"type":"string","description":"Color mode `bw`, `color`. If `bw`, the image will be considered as black and white.","enum":["bw","color"],"default":"color"},"filterSpeckle":{"type":"number","description":"Discard patches smaller than X px in size (higher value will reduce the number of patches, cleaner output)","minimum":0,"maximum":128},"preset":{"type":"string","description":"If preset given, all other parameters will be ignored (mode, colorMode, filterSpeckle, ...), except for custom.","enum":["asset","bw","custom","photo","pixelart","poster"],"default":"custom"},"pathPrecision":{"type":"number","description":"Number of decimal places to use in path string","minimum":0}}},"PostModelsResponse":{"type":"object","required":["model"],"properties":{"model":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}}},"PostJobActionByJobIdResponse":{"type":"object","required":["job"],"properties":{"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"GetPublicModelsResponse":{"type":"object","required":["models"],"properties":{"models":{"type":"array","items":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}},"nextPaginationToken":{"type":"string","description":"A token to query the next page of models"}}},"PostControlnetInpaintIpAdapterInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["controlImage","controlImageId","image","imageId","mask","maskId","prompt"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PutCollectionsByCollectionIdRequest":{"type":"object","properties":{"thumbnail":{"type":"string","description":"The AssetId of the image you want to use as a thumbnail for the collection. Set to null to unset the thumbnail."},"name":{"type":"string","description":"The new name for the Collection"}}},"PostModelsGetBulkRequest":{"type":"object","properties":{"settings":{"type":"boolean","description":"If true, will return the settings: `promptEmbedding` and `negativePromptEmbedding`.","default":false},"trainingImagesPreview":{"type":"boolean","description":"If true will return the first 3 training images; otherwise returns the full training images.\n\nIf `allTrainingImages` set to true, this parameter is ignored.","default":false},"minimal":{"type":"boolean","description":"If true will return only the base details of the model (id, name, type)\n\nif true, all other parameters are ignored","default":false},"thumbnail":{"type":"boolean","description":"If true will return the thumbnail, when no thumbnail is set, will try to fetch the first training image instead.","default":false},"allTrainingImages":{"type":"boolean","description":"If true will return all training images; otherwise returns only the first 3 training images.\n\nIf `trainingImagesPreview` set to true, this parameter is ignored.","default":false},"modelIds":{"type":"array","description":"The list of model IDs to include in the response","items":{"type":"string"}}},"description":"Options for the `POST /models/get-bulk` endpoint\n\nrelates to ModelToJsonOptions"},"GetModelsCategoriesByModelIdResponse":{"type":"object","required":["categories"],"properties":{"categories":{"type":"array","items":{"type":"object","properties":{"name":{"type":"string","description":"The category name (example: \"Art style\")"},"publishedClassesNumber":{"type":"number","description":"The number of published classes within this category.","minimum":0},"slug":{"type":"string","description":"The category slug (example: \"art-style\")"}},"required":["name","publishedClassesNumber","slug"]}}}},"PostWebhookEndpointsResponse":{"type":"object","required":["webhookEndpoint"],"properties":{"webhookEndpoint":{"type":"object","properties":{"createdAt":{"type":"string","description":"The date and time the webhook endpoint was created"},"nbTotalCalls":{"type":"number","description":"The number of calls to the webhook endpoint"},"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"nbFailedCalls":{"type":"number","description":"The number of calls to the webhook endpoint that have failed"},"description":{"type":"string","description":"A description of the webhook endpoint"},"id":{"type":"string","description":"The ID of the webhook endpoint"},"secret":{"type":"string","description":"The endpoint's secret, used to generate webhook signatures. Only returned at creation"},"ownerId":{"type":"string","description":"The ID of the owner of the webhook endpoint"},"enabled":{"type":"boolean","description":"Whether the webhook is enabled"},"url":{"type":"string","description":"The URL of the webhook endpoint"},"updatedAt":{"type":"string","description":"The date and time the webhook endpoint was updated"}},"required":["createdAt","enabled","enabledEvents","id","nbFailedCalls","nbTotalCalls","ownerId","updatedAt","url"]}}},"PostDownloadAssetsResponse":{"type":"object","required":["jobId"],"properties":{"jobId":{"type":"string","description":"The job id associated with the download request"}}},"GetModelsResponse":{"type":"object","required":["models"],"properties":{"models":{"type":"array","items":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}},"nextPaginationToken":{"type":"string","description":"A token to query the next page of models"}}},"PostControlnetInpaintInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"maskId":{"type":"string","description":"Asset id of the mask image"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["controlImage","controlImageId","image","imageId","mask","maskId","modality","prompt"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PutModelsDescriptionByModelIdRequest":{"type":"object","required":["description"],"properties":{"description":{"type":"string","description":"The markdown description of the model (ex: `# My model`). Set to `null` to delete the description."}}},"PostReframeInferencesRequest":{"type":"object","required":["image","targetHeight","targetWidth"],"properties":{"image":{"type":"string","description":"The image to reframe. Must reference an existing AssetId or be a data URL."},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.","minimum":0,"maximum":2147483647},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"negativePrompt":{"type":"string","description":"(deprecated) A negative full text prompt that discourages the repaint from generating certain characteristics. It is recommended to test without using a negative prompt."},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux LoRa or Composition to style the image.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during the restyle.","minimum":1,"maximum":15},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"A full text prompt to guide the repaint process."},"targetWidth":{"type":"number","description":"The target width of the output image.","minimum":0,"maximum":2048}}},"GetAssetBulkResponse":{"type":"object","required":["assets"],"properties":{"assets":{"type":"array","items":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}}},"PutWebhookEndpointsByIdRequest":{"type":"object","properties":{"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"description":{"type":"string","description":"A description of the webhook endpoint"},"enabled":{"type":"boolean","description":"Whether the webhook is enabled"},"url":{"type":"string","description":"The URL of the webhook endpoint"}}},"PostModelsInferencesActionByModelIdAndInferenceIdResponse":{"type":"object","required":["inference"],"properties":{"inference":{"type":"object","properties":{"images":{"type":"array","description":"The result images URLs (can change over time when inference is in progress)","items":{"type":"object","properties":{"seed":{"type":"string","description":"The seed used to generate this image"},"id":{"type":"string","description":"The image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The image URL"}},"required":["id","seed","url"]}},"modelId":{"type":"string","description":"The model ID on which the inference was done (example: \"model_GTrL3mq4SXWyMxkOHRxlpw\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"displayPrompt":{"type":"string","description":"The human-friendly prompt (without token)"},"ownerId":{"type":"string","description":"The owner user ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"createdAt":{"type":"string","description":"The inference creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"progress":{"type":"number","description":"The inference progress (within [0, 1])","minimum":0,"maximum":1},"id":{"type":"string","description":"The inference ID (example: \"inf_GTrL3mq4SXWyMxkOHRxlpw\")"},"imagesNumber":{"type":"number","description":"The total number of result images"},"parameters":{"type":"object","description":"The inference parameters","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"queue":{"type":"object","description":"The generation queue information (for generations with the \"queued\" status)","properties":{"approximateNumberOfRequests":{"type":"number","description":"The approximate number of requests in the generation queue"},"approximatePosition":{"type":"number","description":"The approximate position in the generation queue"}},"required":["approximateNumberOfRequests","approximatePosition"]},"status":{"type":"string","description":"The inference status","enum":["canceled","failed","in-progress","model-loading","new","queued","succeeded"]}},"required":["authorId","createdAt","displayPrompt","id","images","imagesNumber","modelId","ownerId","parameters","status","userId"]}}},"GetWorkflowsTagsResponse":{"type":"object","required":["tags"],"properties":{"tags":{"type":"array","description":"Array of unique tags from workflows in the project","items":{"type":"string"}}}},"GetModelsInferencesByModelIdAndInferenceIdResponse":{"type":"object","required":["inference"],"properties":{"inference":{"type":"object","properties":{"images":{"type":"array","description":"The result images URLs (can change over time when inference is in progress)","items":{"type":"object","properties":{"seed":{"type":"string","description":"The seed used to generate this image"},"id":{"type":"string","description":"The image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The image URL"}},"required":["id","seed","url"]}},"modelId":{"type":"string","description":"The model ID on which the inference was done (example: \"model_GTrL3mq4SXWyMxkOHRxlpw\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"displayPrompt":{"type":"string","description":"The human-friendly prompt (without token)"},"ownerId":{"type":"string","description":"The owner user ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"createdAt":{"type":"string","description":"The inference creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"progress":{"type":"number","description":"The inference progress (within [0, 1])","minimum":0,"maximum":1},"id":{"type":"string","description":"The inference ID (example: \"inf_GTrL3mq4SXWyMxkOHRxlpw\")"},"imagesNumber":{"type":"number","description":"The total number of result images"},"parameters":{"type":"object","description":"The inference parameters","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"queue":{"type":"object","description":"The generation queue information (for generations with the \"queued\" status)","properties":{"approximateNumberOfRequests":{"type":"number","description":"The approximate number of requests in the generation queue"},"approximatePosition":{"type":"number","description":"The approximate position in the generation queue"}},"required":["approximateNumberOfRequests","approximatePosition"]},"status":{"type":"string","description":"The inference status","enum":["canceled","failed","in-progress","model-loading","new","queued","succeeded"]}},"required":["authorId","createdAt","displayPrompt","id","images","imagesNumber","modelId","ownerId","parameters","status","userId"]}}},"PostControlnetTextureInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"image":{"type":"string","description":"Signed URL to display the input image"},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"imageId":{"type":"string","description":"Asset id of the input image"},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048}},"required":["image","imageId","modality","prompt"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"LockAssetByAssetIdResponse":{"type":"object","required":["asset"],"properties":{"lockId":{"type":"string","description":"The value of the lock to use when updating/unlocking the canvas."},"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"CopyAssetByAssetIdRequest":{"type":"object","properties":{"targetProjectId":{"type":"string","description":"The id of the project to copy the asset to. If not provided, the asset will be copied to the canvas project."}}},"PostPromptEditingInferencesRequest":{"type":"object","required":["image","prompt"],"properties":{"image":{"type":"string","description":"The image to edit. Must reference an existing AssetId or be a data URL."},"referenceImages":{"type":"array","description":"List of additional reference images as a data URLs (example: \\\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\\\") or the asset IDs (example: \\\"asset_GTrL3mq4SXWyMxkOHRxlpw\\\").\n\n5 additional reference images are allowed for the `gemini-2.0-flash`, `gemini-2.5-flash` and `gpt-image-1` models.\n3 additional reference images are allowed for the `flux-kontext` model.\nNo additional reference images are allowed for the `flux-kontext` model if using concepts.\n2 additional reference images are allowed for the `runway-gen4-image` model.","items":{"type":"string"}},"seed":{"type":"number","description":"The seed to use for the image generation.\n\nOnly available for the `flux-kontext` model."},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"guidanceScale":{"type":"number","description":"The guidance scale to use for the image generation.\n\nOnly available for the `flux-kontext` model."},"modelId":{"type":"string","description":"The model to use. Can be \"gemini-2.0-flash\", \"gemini-2.5-flash\", \"gpt-image-1\", \"flux-kontext\", \"runway-gen4-image\" or \"seedream-4\".","enum":["flux-kontext","gemini-2.0-flash","gemini-2.5-flash","gpt-image-1","runway-gen4-image","seedream-4"],"default":"gemini-2.5-flash"},"format":{"type":"string","description":"The format of the generated image(s)\n\nThis parameter is only supported for the `gpt-image-1` model.","enum":["jpeg","png","webp"],"default":"png"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated image(s).\nSupported for: `gemini-2.5-flash`, `gpt-image-1`, `flux-kontext`, `runway-gen4-image`, `seedream-4`.\nWill default to `auto` for other models and unknown ratios.\n\nNotes:\n- `gemini-2.5-flash` supports Landscape: 21:9, 16:9, 4:3, 3:2 • Square: 1:1 • Portrait: 9:16, 3:4, 2:3 • Flexible: 5:4, 4:5 • `auto`.\n- `gpt-image-1` supports `1:1`, `3:2`, `2:3`, `auto` (unknown ratios fall back to `auto`).\n- `runway-gen4-image` supports `1:1`, `4:3`, `3:4`, `16:9`, `9:16`, `auto` (unknown ratios fall back to `auto`).\n- `seedream-4` supports `1:1`, `4:3`, `3:4`, `16:9`, `9:16`, `2:3`, `3:2`, `21:9`, `auto`.","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21","auto"],"default":"auto"},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"quality":{"type":"string","description":"The quality of the generated image(s).\n\nOnly available for the `gpt-image-1`, `flux-kontext`, `runway-gen4-image` and `seedream-4` models.","enum":["high","low","medium"],"default":"high"},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"numSamples":{"type":"number","description":"The number of samples to generate\n\nMaximum depends on the subscription tier."},"compression":{"type":"number","description":"The compression level (0-100%) for the generated images. This parameter is only supported for\nthe `gpt-image-1` model with the `webp` or `jpeg` output formats, and defaults to 100."},"prompt":{"type":"string","description":"The prompt to edit the given image."},"mask":{"type":"string","description":"You can provide a mask to indicate where the image should be edited. The black area of the mask\nwill be replaced, while the filled areas will be kept as is.\n\nMust reference an existing AssetId or be a data URL.\n\nOnly available for the `gpt-image-1` model. Will be ignored for other models."}}},"InternalServerErrorResponse":{"type":"object","required":["apiError","createdAt","error","id","infos","updatedAt"],"properties":{"createdAt":{"type":"string"},"apiError":{"type":"string"},"id":{"type":"string"},"error":{"type":"string"},"infos":{"type":"object","properties":{}},"updatedAt":{"type":"string"}}},"GetAssetsByAssetIdResponse":{"type":"object","required":["asset"],"properties":{"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"PostPatchInferencesRequest":{"type":"object","required":["image"],"properties":{"patch":{"type":"object","description":"The image to be merged.","properties":{"mode":{"type":"string","description":"The mode of merging the images: `override` or `erase`.","enum":["erase","override"],"default":"override"},"image":{"type":"string","description":"The source of the image to be merged, as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["image","mode"]},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"backgroundColor":{"type":"string","description":"The background color as an hexadecimal code (ex: \"#FFFFFF\"), an html color (ex: \"red\") or \"transparent\" if \"format\" is \"png\". Default to \"white\""},"format":{"type":"string","description":"The output format. Default to \"png\"","enum":["jpeg","png"]},"position":{"type":"object","description":"The position of the image to be merged.","properties":{"x":{"type":"number","description":"The X position of the image to be merged, in pixels."},"y":{"type":"number","description":"The Y position of the image to be merged, in pixels."}},"required":["x","y"]},"allowOverflow":{"type":"boolean","description":"Whether to allow the merged image to extend the size of the original (when x or y are negative or merged image is bigger)"},"crop":{"type":"object","description":"The crop operation to apply to the image. Applied before any operation. For the backgroundColor: rgba, hex or named color are supported.","properties":{"backgroundColor":{"type":"string"},"top":{"type":"number"},"left":{"type":"number"},"width":{"type":"number"},"height":{"type":"number"}},"required":["height","left","top","width"]}}},"PostModelActionByModelIdRequest":{"type":"object","required":["action"],"properties":{"action":{"type":"string","description":"The action to perform on the model","enum":["restrict","unload"]},"value":{"type":"string","description":"The value to use for the action\nFor example, the value for the restrict action is the plan name"}}},"PostModelPresetByModelIdResponse":{"type":"object","required":["preset"],"properties":{"preset":{"type":"object","properties":{"createdAt":{"type":"string","description":"The preset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"isDefault":{"type":"boolean","description":"Whether the preset is the default preset of the model (example: true)"},"modelId":{"type":"string","description":"The model ID (example: \"eyVcnFJcR92BxBkz7N6g5w\")"},"id":{"type":"string","description":"The preset ID (example: \"eyVcnFJcR92BxBkz7N6g5w\")"},"authorId":{"type":"string","description":"The author user ID (example: \"VFhihHKMRZyDDnZAJwLb2Q\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"VFhihHKMRZyDDnZAJwLb2Q\")"},"parameters":{"type":"object","description":"The inference parameters","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"updatedAt":{"type":"string","description":"The preset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["authorId","createdAt","id","isDefault","modelId","ownerId","parameters","updatedAt"]}}},"PostAssetRequest":{"type":"object","required":["name"],"properties":{"image":{"type":"string","description":"The image to upload in base64 format string."},"canvas":{"type":"string","description":"The canvas to upload as a stringified JSON. Ignored if `image` is provided."},"thumbnail":{"type":"string","description":"The thumbnail for the canvas in base64 format string. Ignored if `image` is provided."},"hide":{"type":"boolean","description":"Toggles the hidden status of the asset."},"collectionIds":{"type":"array","description":"The IDs of the collections to add the asset to. If provided, the new asset will be added to the collections.","items":{"type":"string"}},"name":{"type":"string","description":"The original file name of the image (example: \"low-res-image.jpg\")."},"parentId":{"type":"string","description":"Specifies the parent asset Id for the asset."}}},"PutUserNotificationsByTokenRequest":{"type":"object","required":["auto-refill-disabled","creative-units-threshold-100","creative-units-threshold-50","creative-units-threshold-90","model-train-succeeded","model-transfer","model-validation-failed","soft-deletion-first-call","soft-deletion-last-call"],"properties":{"model-validation-failed":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"creative-units-threshold-50":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"model-transfer":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"auto-refill-disabled":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"soft-deletion-first-call":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"creative-units-threshold-90":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"soft-deletion-last-call":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"creative-units-threshold-100":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"model-train-succeeded":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]}}},"GetPublicAssetsByAssetIdResponse":{"type":"object","required":["asset"],"properties":{"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"PostPromptInferencesResponse":{"type":"object","required":["job","prompts"],"properties":{"mode":{"type":"string","description":"The mode used to generate new prompt(s).","enum":["completion","contextual","image-editing","inventive","structured"],"default":"structured"},"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]},"prompts":{"type":"array","description":"The generated prompts.","items":{"type":"string"}}}},"GetModelsScoresTrainingDatasetByModelIdResponse":{"type":"object","required":["details","modelId","score","status"],"properties":{"score":{"type":"number","description":"The training dataset's score for the input modelId\n\nThe score is a number between 0 and 1. The higher the score, the most chance the model has to train well","minimum":0,"maximum":1},"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"assetsWarnings":{"type":"array","description":"Present only if the training image assets of the modelId have one or more warnings","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId which has a warning (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"message":{"type":"array","description":"An indicative message concerning the asset","items":{"type":"string","enum":["Captions are identical","Captions are too close","Empty caption","Missing caption","Missing metadata","Same asset","Similarity is too high","Similarity with other assets is too low","Width and height must be larger than 1024"]}}},"required":["assetId","message"]}},"assetCouplesWarnings":{"type":"array","description":"Present only if their is a warning between the assets of the couples","items":{"type":"object","properties":{"couple":{"type":"array","description":"The couple of AssetIds (example: ['asset_GTrL3mq4SXWyMxkOHRxlpw', 'asset_6VrLs8nC1DHWCfKGaVne0gBn'])","items":{}},"message":{"type":"array","description":"An indicative message concerning the couple of assets","items":{"type":"string","enum":["Captions are identical","Captions are too close","Empty caption","Missing caption","Missing metadata","Same asset","Similarity is too high","Similarity with other assets is too low","Width and height must be larger than 1024"]}}},"required":["couple","message"]}},"details":{"type":"object","description":"Different components used to compute the final score","properties":{"couplesCaptionsDistance":{"type":"object","description":"The min, mean, median of the assetCouplesCaptionsDistance","properties":{"min":{"type":"number","minimum":0,"maximum":1},"median":{"type":"number","minimum":0,"maximum":1},"mean":{"type":"number","minimum":0,"maximum":1}},"required":["mean","median","min"]},"assetCouplesCaptionsDistance":{"type":"array","description":"For each couple of assets in the training dataset, the distance between the captions","items":{"type":"object","properties":{"couple":{"type":"array","description":"The couple of AssetIds (example: ['asset_GTrL3mq4SXWyMxkOHRxlpw', 'asset_6VrLs8nC1DHWCfKGaVne0gBn'])","items":{}},"distance":{"type":"number","description":"The Dice-Sørensen distance between the captions of the assetIds of the couple.","minimum":0,"maximum":1}},"required":["couple","distance"]}},"couplesSimilarity":{"type":"object","description":"The min, mean, median of the assetCouplesSimilarity","properties":{"min":{"type":"number","minimum":-1,"maximum":1},"median":{"type":"number","minimum":-1,"maximum":1},"mean":{"type":"number","minimum":-1,"maximum":1}},"required":["mean","median","min"]},"assetsEmbeddingSimilarity":{"type":"array","description":"For each asset, the similarity with the model's embedding (excluding the current asset)","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId associated with their similarity (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"similarity":{"type":"number","description":"The cosine similarity between the assetId and the model's embedding minus the asset's embedding\n\nThe value is a number between -1 and 1. The higher the value, the more similar the asset is to the model's embedding","minimum":-1,"maximum":1}},"required":["assetId","similarity"]}},"metadataScore":{"type":"number","description":"Takes into account all missing data and warnings to compute a metadata score","minimum":-1,"maximum":1},"assetCouplesSimilarity":{"type":"array","description":"For each couple of assets in the training dataset, the similarity between the assets","items":{"type":"object","properties":{"couple":{"type":"array","description":"The couple of AssetIds (example: ['asset_GTrL3mq4SXWyMxkOHRxlpw', 'asset_6VrLs8nC1DHWCfKGaVne0gBn'])","items":{}},"similarity":{"type":"number","description":"The cosine similarity between the assetIds of the couple.\n\nThe value is a number between -1 and 1. The higher the value, the more similar the assets are to each other","minimum":-1,"maximum":1}},"required":["couple","similarity"]}},"assetsSimilarity":{"type":"object","description":"The min, mean, median of the assetsEmbeddingSimilarity","properties":{"min":{"type":"number","minimum":-1,"maximum":1},"median":{"type":"number","minimum":-1,"maximum":1},"mean":{"type":"number","minimum":-1,"maximum":1}},"required":["mean","median","min"]}},"required":["assetCouplesCaptionsDistance","assetCouplesSimilarity","assetsEmbeddingSimilarity","assetsSimilarity","couplesCaptionsDistance","couplesSimilarity","metadataScore"]},"status":{"type":"string","description":"The status of the training dataset's score\n\nDetails if all the necessary data is present to compute the score","enum":["complete","incomplete","unknown"]}}},"PutUserNotificationsByTokenResponse":{"type":"object","required":["notifications"],"properties":{"notifications":{"type":"object","properties":{"model-validation-failed":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"creative-units-threshold-50":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"model-transfer":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"auto-refill-disabled":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"soft-deletion-first-call":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"creative-units-threshold-90":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"soft-deletion-last-call":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"creative-units-threshold-100":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]},"model-train-succeeded":{"type":"object","properties":{"mobile":{"type":"boolean"},"email":{"type":"boolean"}},"required":["email","mobile"]}},"required":["auto-refill-disabled","creative-units-threshold-100","creative-units-threshold-50","creative-units-threshold-90","model-train-succeeded","model-transfer","model-validation-failed","soft-deletion-first-call","soft-deletion-last-call"]}}},"PostWebhookEndpointsRequest":{"type":"object","required":["enabledEvents","url"],"properties":{"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"description":{"type":"string","description":"A description of the webhook endpoint"},"url":{"type":"string","description":"The URL of the webhook endpoint"}}},"PostAssetGetBulkRequest":{"type":"object","properties":{"assetIds":{"type":"array","description":"The list of asset ids the team has read access to. Limit of 200 assets.","items":{"type":"string"}}}},"JobAndBillingGenerateResponse":{"type":"object","properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"DeleteModelsInferencesImagesByModelIdAndInferenceIdAndImageIdResponse":{},"PostWebhooksWorkflowJobIdRequest":{"type":"object","required":["jobId"],"properties":{"jobId":{"type":"string"},"status":{"type":"string"}}},"PostModelsRequest":{"type":"object","properties":{"concepts":{"type":"array","description":"The concepts is required for composition models. With one or more loras\n\nOnly applicable to Flux based models (and older SD1.5 and SDXL models)","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"List of collection IDs to add the model to","items":{"type":"string"}},"classSlug":{"type":"string","description":"The slug of the class you want to use (ex: \"characters-npcs-mobs-characters\"). Set to null to unset the class","pattern":"^[a-z0-9-]+$"},"name":{"type":"string","description":"The model's name (ex: \"Cinematic Realism\").\n\nIf not set, the model's name will be automatically generated when starting training based on training data.","maxLength":64},"shortDescription":{"type":"string","description":"The model's short description (ex: \"This model generates highly detailed cinematic scenes.\").\n\nIf not set, the model's short description will be automatically generated when starting training based on training data.","maxLength":256},"baseModelId":{"type":"string","description":"The ID of the base model to use as a starting point for the training (example: \"flux.1-dev\")\n\nValue is automatically set based on the model's type. In case of doubt leave it empty."},"type":{"type":"string","description":"The model's type (ex: \"flux.1-lora\").\n\nThe type can only be changed if the model has the \"new\" status.","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}}},"GetTagsResponse":{"type":"object","required":["tags"],"properties":{"nextPaginationToken":{"type":"string","description":"A token to query the next page of tags"},"tags":{"type":"array","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The tag creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"tagName":{"type":"string","description":"The tag name"},"itemCount":{"type":"number","description":"The number of items that have this tag"},"updatedAt":{"type":"string","description":"The tag last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","itemCount","ownerId","tagName","updatedAt"]}}}},"DeleteModelPresetByModelIdAndPresetIdResponse":{},"CopyAssetResponse":{"type":"object","required":["asset"],"properties":{"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"UnlockAssetByAssetIdResponse":{"type":"object","required":["asset"],"properties":{"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"PostWebhooksFalJobIdResponse":{"type":"object","properties":{"message":{"type":"string"}}},"DryRunResponse":{"type":"object","required":["creativeUnitsCost"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"}}},"GetModelsInferencesByModelIdResponse":{"type":"object","required":["inferences"],"properties":{"nextPaginationToken":{"type":"string","description":"A token to query the next page of inferences"},"inferences":{"type":"array","items":{"type":"object","properties":{"images":{"type":"array","description":"The result images URLs (can change over time when inference is in progress)","items":{"type":"object","properties":{"seed":{"type":"string","description":"The seed used to generate this image"},"id":{"type":"string","description":"The image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The image URL"}},"required":["id","seed","url"]}},"modelId":{"type":"string","description":"The model ID on which the inference was done (example: \"model_GTrL3mq4SXWyMxkOHRxlpw\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"displayPrompt":{"type":"string","description":"The human-friendly prompt (without token)"},"ownerId":{"type":"string","description":"The owner user ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"createdAt":{"type":"string","description":"The inference creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"progress":{"type":"number","description":"The inference progress (within [0, 1])","minimum":0,"maximum":1},"id":{"type":"string","description":"The inference ID (example: \"inf_GTrL3mq4SXWyMxkOHRxlpw\")"},"imagesNumber":{"type":"number","description":"The total number of result images"},"parameters":{"type":"object","description":"The inference parameters","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"queue":{"type":"object","description":"The generation queue information (for generations with the \"queued\" status)","properties":{"approximateNumberOfRequests":{"type":"number","description":"The approximate number of requests in the generation queue"},"approximatePosition":{"type":"number","description":"The approximate position in the generation queue"}},"required":["approximateNumberOfRequests","approximatePosition"]},"status":{"type":"string","description":"The inference status","enum":["canceled","failed","in-progress","model-loading","new","queued","succeeded"]}},"required":["authorId","createdAt","displayPrompt","id","images","imagesNumber","modelId","ownerId","parameters","status","userId"]}}}},"PostProjectWebhookEndpointsRequest":{"type":"object","required":["enabledEvents","url"],"properties":{"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"description":{"type":"string","description":"A description of the webhook endpoint"},"url":{"type":"string","description":"The URL of the webhook endpoint"}}},"PostModelsInferencesByModelIdResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"images":{"type":"array","description":"The result images URLs (can change over time when inference is in progress)","items":{"type":"object","properties":{"seed":{"type":"string","description":"The seed used to generate this image"},"id":{"type":"string","description":"The image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The image URL"}},"required":["id","seed","url"]}},"modelId":{"type":"string","description":"The model ID on which the inference was done (example: \"model_GTrL3mq4SXWyMxkOHRxlpw\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"displayPrompt":{"type":"string","description":"The human-friendly prompt (without token)"},"ownerId":{"type":"string","description":"The owner user ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"createdAt":{"type":"string","description":"The inference creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"progress":{"type":"number","description":"The inference progress (within [0, 1])","minimum":0,"maximum":1},"id":{"type":"string","description":"The inference ID (example: \"inf_GTrL3mq4SXWyMxkOHRxlpw\")"},"imagesNumber":{"type":"number","description":"The total number of result images"},"parameters":{"type":"object","description":"The inference parameters","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"queue":{"type":"object","description":"The generation queue information (for generations with the \"queued\" status)","properties":{"approximateNumberOfRequests":{"type":"number","description":"The approximate number of requests in the generation queue"},"approximatePosition":{"type":"number","description":"The approximate position in the generation queue"}},"required":["approximateNumberOfRequests","approximatePosition"]},"status":{"type":"string","description":"The inference status","enum":["canceled","failed","in-progress","model-loading","new","queued","succeeded"]}},"required":["authorId","createdAt","displayPrompt","id","images","imagesNumber","modelId","ownerId","parameters","status","userId"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PutAssetsByCollectionIdRequest":{"type":"object","required":["assetIds"],"properties":{"assetIds":{"type":"array","description":"The ids of the assets to add to the collection. (Max 49 at once)","items":{"type":"string"}}}},"PostCaptionInferencesResponse":{"type":"object","required":["captions","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]},"captions":{"type":"array","description":"The captions for each image.","items":{"type":"string"}},"detailsLevel":{"type":"string","description":"The details level used to generate the captions.\n\nWhen a modelId is provided and examples are available, the details level is ignored.","enum":["action","action+style"]}}},"PostModelsInferencesActionByModelIdAndInferenceIdRequest":{"type":"object","properties":{"action":{"type":"string","description":"The action to execute on the inference, such as canceling it","enum":["cancel"]}}},"GetCollectionsByCollectionIdResponse":{"type":"object","required":["collection"],"properties":{"collection":{"type":"object","properties":{"createdAt":{"type":"string","description":"The collection creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The thumbnail for the collection (if any)","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"name":{"type":"string","description":"The collection name"},"assetCount":{"type":"number"},"id":{"type":"string","description":"The collection ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"modelCount":{"type":"number"},"itemCount":{"type":"number"},"updatedAt":{"type":"string","description":"The collection last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["assetCount","createdAt","id","itemCount","modelCount","name","ownerId","updatedAt"]}}},"DeleteModelsImagesByModelIdResponse":{},"GetTeamWebhookEndpointsResponse":{"type":"object","required":["webhookEndpoints"],"properties":{"webhookEndpoints":{"type":"array","description":"A list of the team webhooks","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The date and time the webhook endpoint was created"},"nbTotalCalls":{"type":"number","description":"The number of calls to the webhook endpoint"},"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"nbFailedCalls":{"type":"number","description":"The number of calls to the webhook endpoint that have failed"},"description":{"type":"string","description":"A description of the webhook endpoint"},"id":{"type":"string","description":"The ID of the webhook endpoint"},"secret":{"type":"string","description":"The endpoint's secret, used to generate webhook signatures. Only returned at creation"},"ownerId":{"type":"string","description":"The ID of the owner of the webhook endpoint"},"enabled":{"type":"boolean","description":"Whether the webhook is enabled"},"url":{"type":"string","description":"The URL of the webhook endpoint"},"updatedAt":{"type":"string","description":"The date and time the webhook endpoint was updated"}},"required":["createdAt","enabled","enabledEvents","id","nbFailedCalls","nbTotalCalls","ownerId","updatedAt","url"]}}}},"PostWebhooksFalJobIdRequest":{"type":"object","required":["gateway_request_id","request_id","status"],"properties":{"gateway_request_id":{"type":"string"},"payload":{"type":"object","properties":{"model_meshes":{"type":"array","items":{"type":"object","properties":{"url":{"type":"string"}},"required":["url"]}},"image":{"type":"object","properties":{"url":{"type":"string"}},"required":["url"]},"images":{"type":"array","items":{"type":"object","properties":{"url":{"type":"string"}},"required":["url"]}},"seed":{"type":"number"},"model_mesh":{"type":"object","properties":{"url":{"type":"string"}},"required":["url"]},"videos":{"type":"array","items":{"type":"object","properties":{"url":{"type":"string"}},"required":["url"]}},"detail":{"type":"array","items":{"type":"object","properties":{"msg":{"type":"string"},"loc":{"type":"array","items":{"type":"string"}},"type":{"type":"string"}},"required":["loc","msg","type"]}},"video":{"type":"object","properties":{"url":{"type":"string"}},"required":["url"]}}},"error":{"type":"string"},"request_id":{"type":"string"},"status":{"type":"string","enum":["ERROR","OK"]}},"description":"Types for webhook response payloads"},"PutImagesSegmentationResponse":{"type":"object","required":["segments","sourceAsset"],"properties":{"sourceAsset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"checkpoint":{"type":"string","description":"The checkpoint to use","enum":["fastsam_x","sam_b","sam_h"],"default":"fastsam_x"},"nbMasks":{"type":"number"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"copiedAt":{"type":"string","format":"date-time"},"message":{"type":"string"},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"outputIndex":{"type":"number"},"size":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"parentJobId":{"type":"string"},"width":{"type":"number"},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"height":{"type":"number"}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]},"segments":{"type":"array","items":{"type":"object","properties":{"checkpoint":{"type":"string","enum":["fastsam_x","sam_b","sam_h"]},"nbMasks":{"type":"number"},"images":{"type":"array","items":{"type":"string"}},"bbox":{"type":"array","items":{}},"masks":{"type":"array","items":{"type":"string"}},"message":{"type":"string"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}}},"required":["bbox","checkpoint","message","nbMasks"]}}}},"PostTxt2imgInferencesRequest":{"type":"object","required":["modelId","prompt"],"properties":{"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelId":{"type":"string","description":"The model id to use for the inference"},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048}}},"PostControlnetImg2imgInferencesRequest":{"type":"object","required":["modality","modelId","prompt"],"properties":{"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelId":{"type":"string","description":"The model id to use for the inference"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"controlImageId":{"type":"string","description":"The controlnet input image as an AssetId. Will be ignored if the `controlnet` parameter is provided"},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"The controlnet input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\")"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}}},"PostInpaintIpAdapterInferencesRequest":{"type":"object","required":["modelId","prompt"],"properties":{"ipAdapterImageIds":{"type":"array","description":"The IpAdapter images as an AssetId. Will be ignored if the `ipAdapterImages` parameter is provided","items":{"type":"string"}},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelId":{"type":"string","description":"The model id to use for the inference"},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"ipAdapterImage":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImages` instead.\nThe IpAdapter image as a data url. Will be ignored if the `ipAdapterImages` parameter is provided."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"ipAdapterImages":{"type":"array","description":"The IpAdapter images as a data url.","items":{"type":"string"}},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"mask":{"type":"string","description":"The mask as a data URL, used to determine the area of change. The mask is a binary mask made out of white and black pixels. The white area is the one that will be replaced. (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAABiVBMVEUAAADw8PDCwsLExMT5+fn19fX8/Pz////+/v79/f2hoaH6+vrc3NxnZ2dDQ0P4+PhkZGTs7OzOzs6Ojo709PRiYmLd3d1paWkoKCji4uI9PT3n5+fe3t7z8/NISEiysrLg4ODk5OSYmJh/f3/u7u5lZWVRUVHS0tKIiIg+Pj7p6emXl5dUVFQYGBjKysqtra1TU1PT09M8PDwcHBzR0dHq6uoEBAQmJiZ8fHzm5ub7+/swMDCrq6uKioqpqalHR0c3NzdOTk6BgYF7e3uwsLCAgIB3d3empqaNjY06OjrW1tZhYWG0tLQgICBxcXEICAhPT0/o6OgkJCRzc3N5eXnV1dXj4+NKSkobGxtaWlpfX1/a2trBwcF2dnYlJSV9fX3Hx8eSkpJNTU1sbGyWlpYRERGCgoIMDAzPz8+MjIy4uLiTk5PNzc3X19cxMTGDg4MpKSm8vLxGRkavr69QUFAKCgoqKiq2trbt7e329vaGhobl5eVra2tZWVk4ODgzMzNcXFyurq63t7dzhmTOAAAFeElEQVR4nO3dZXMUaRQF4EBIOgkQdFncFhZfZPF1d3d3d3f95TthC6iQTE9kuk+Y93m+MpW6t8+p7mGkZ2gIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACK8MDyR784eTw9BRkj1RU70rPQuvFqmjXpeWjVPdUMn6VnokUz86+qjemhaM3obAWoqvRYtGX2/DWgFA93K8DK9GS0Yku3AlTr06PRht+6FqB6Mj0bLVjfvQCeBpRgb00BRtLD0YKaAjgFlKCuAF4ULsAKp4CyHakrgLcGC1BXAKeAAihA4cbqCnAyPR2N211XgNH0dDRup2tA4RSgcLUFOJaejsbVFuC59HQ0bltdAW5KT0fjVtcV4GB6OppXV4C30sPRvLoCHE0PR/PqCvBEejiaV1eA9Gy0QAEKV5P/WHo2WlBTgHXp2WhBTQFuSM9GCzwFKNv3ngKU7WL3ApxOz0YLXAHKNqwAZavJf0V6NlpQU4Bd6dloXpd7BLkClKImfwUogO+Glu3VuvwVYPDV5q8AA2+lAhTtVH3+CjDoeuRf3ZgekEYt61WAi+kJaVLt98JdAwbe/b3z14ABdn4u+VfVY+k5acbNc8u/qjalJ6UJd881/47t6WHpu2PzyL/yCzID56755T/llvTM9M8H88+/Y5+XhQbE2QXlP2XVg5/cfnrPmvHLtxYcG3nhu+dXp/dhfmpvCLIw4+mdmLtn+59/xzPptZirRvL3AeLrRkP5uwxcJxrL3y8MLSVP/XHr7P/QYP5VtbbdJelm/7RYduza+ebXmzdfakSj+XvrcIn4tOGYu9uQXp2O92P5u5vAUjDnt3mbkF6exi/z9X5Mb89r0QLcl16faP7uLR9X+1XP5qXXJ5u/AqT1/KZHs35J71+8bP5OAGl7svn/nd6/eNn8t6TXJ5r/4fT27Evm/1N6e6InALeVXwKC+b+c3p2h8FOA29Lbc2+0ANXe9P7FO5MtQPVN+gCULpx/VQ2nj0Dh0vl7KTgsHb/vhoSl46+cArLS6XcsTx+Dkv2QTr/jxfRBKNnT6fQ7VqUPQsneS6dfuZ9Y1IZ0+h3b0gehZHU//9eWC+mDULLwB0Iv8b+AoLHe+TQufQyKlg6/8kpgVjr9ygkgK51+Vd2ZPgRlS8dffZ4+AoVL5+9u0mHh/Hem9y9eMv0xNxLPG0k2IL08Q0PLkwXwCkBe9s2gX9PrcyJaABeBvGwBjqfXJ1sAp4A4BShc+BZxbhaf9ki2AI+n9ydbgNfT65MtwD/p9fk5WoAP0+uTPQVsTm9PtgDp5cm+H3QmvTxD0VNAenWmrIvlfzC9OpeE4h87ml6c/2XuF74svTZXBOIfOZVemqv29w6sv/79KL0y00y0m/+59L5ca0u7DUivywyrekQ2+vGhPj5VWJdelxkOdAtrbNvE5ceM960ByU2Z3UMzY1q56cj0x1xQgEG26WpA4wfWfjnrY/p0SxG/FbI0vTP1EcHDq7fWPOTtvhTgfGsr0Xf9uLHUV+klWIQ/F38deCO9A4uz2E+TH0ovwGIt7ovl6enpg4nRBefvi+EDYniBLw29lB6cvhlewNMBN4gfMOcmT9yxfe4XhInef5Hr0dmtk5NbJ799Ze36uvg3/pWek+btXdkl/jW/p0ejLbuufXYwtvvd9EwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPfwHLuRXafg0SKQAAAAASUVORK5CYII=\")"},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"ipAdapterImageId":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImageIds` instead.\nThe IpAdapter image as an AssetId. Cannot be set if `ipAdapterImage` is provided. Will be ignored if the `ipAdapterImageIds` parameter is provided."},"ipAdapterScale":{"type":"number","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterScales` instead.\nIpAdapter scale factor (within [0.0, 1.0], default: 0.9). Will be ignored if the `ipAdapterScales` parameter is provided","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"The mask as an AssetId. Will be ignored if the `image` parameter is provided"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}}},"PostProjectRequest":{"type":"object","required":["name"],"properties":{"privacyMode":{"type":"boolean","description":"Whether to create the project in privacyMode. Default is false."},"autoJoin":{"type":"boolean","description":"Whether to automatically join the project as admin."},"name":{"type":"string","description":"The name of the project."}}},"PutWorkflowUserApprovalByWorkflowIdResponse":{"type":"object","required":["job"],"properties":{"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PostAssetPrivacyByAssetIdRequest":{"type":"object","properties":{"privacy":{"type":"string","description":"The privacy of the asset","enum":["public","private"]}}},"PostUploadsActionResponse":{"type":"object","required":["upload"],"properties":{"upload":{"type":"object","properties":{"originalFileName":{"type":"string"},"fileName":{"type":"string"},"partsCount":{"type":"number"},"kind":{"type":"string","description":"The kind of the file once validated (example: \"model\")","enum":["3d","asset","audio","avatar","image","model","video"]},"errorMessage":{"type":"string"},"entityId":{"type":"string"},"source":{"type":"string","enum":["civitai","huggingface","multipart","other","url"]},"authorId":{"type":"string"},"ownerId":{"type":"string"},"url":{"type":"string"},"createdAt":{"type":"string"},"jobId":{"type":"string"},"fileSize":{"type":"number"},"provider":{"type":"string","enum":["civitai","huggingface","other"]},"parts":{"type":"array","items":{"type":"object","properties":{"number":{"type":"number"},"expires":{"type":"string"},"url":{"type":"string"}},"required":["expires","number","url"]}},"id":{"type":"string"},"config":{"type":"object","properties":{}},"contentType":{"type":"string"},"assetOptions":{"type":"object","properties":{"hide":{"type":"boolean","description":"Specify if the asset should be hidden from the user."},"collectionIds":{"type":"array","description":"The collection ids to add the asset to.","items":{"type":"string"}},"parentId":{"type":"string","description":"The parentId of the asset."}}},"status":{"type":"string","enum":["complete","failed","imported","pending","validated","validating"]},"updatedAt":{"type":"string"}},"required":["authorId","createdAt","fileName","id","kind","ownerId","source","status","updatedAt"]}}},"PostCaptionInferencesRequest":{"type":"object","required":["images"],"properties":{"ensureIPCleared":{"type":"boolean","description":"Whether we try to ensure IP removal for new prompt generation."},"images":{"type":"array","description":"List of images used to generate captions. Results are returned in the same order as the given\nimages.\n\nImages are set a data URLs (example: \\\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\\\") or the asset IDs (example: \\\"asset_GTrL3mq4SXWyMxkOHRxlpw\\\").\n\nNotes:\n- if both `modelId` and `images` are provided, `modelId`'s examples and training images will be\n  used to influence the caption structure of the images\n- if only `images` are provided, the captions will be conditioned by the `detailsLevel` parameter\n- Replaces `assetIds` parameter\n- if you want to caption multiple images at a time, please prefer using asset ids instead of data url","items":{"type":"string"}},"seed":{"type":"number","description":"If specified, the API will make a best effort to produce the same results, such that repeated requests with the same `seed` and parameters should return the same outputs. Must be used along with the same parameters including prompt, model's state, etc.."},"unwantedSequences":{"type":"array","description":"Optional list of words sequences that should not be present in the generated prompts.","items":{"type":"string"}},"modelId":{"type":"string","description":"When provided, the model will follow the model's training images and examples' prompt to generate the captions."},"temperature":{"type":"number","description":"The sampling temperature to use. Higher values like `0.8` will make the output more random, while lower values like `0.2` will make it more focused and deterministic.\n\nWe generally recommend altering this or `topP` but not both.","minimum":0,"maximum":2},"assetIds":{"type":"array","description":"The assetIds to generate captions. Results are returned in the same order as the given\nassetIds. Deprecated, use `images` parameter instead.","items":{"type":"string"}},"topP":{"type":"number","description":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So `0.1` means only the tokens comprising the top `10%` probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.","minimum":0,"maximum":1},"detailsLevel":{"type":"string","description":"The details level used to generate the captions.\n\nWhen a modelId is provided and examples are available, the details level is ignored.","enum":["action","action+style"]}}},"PutImageVectorizationResponse":{"type":"object","required":["asset"],"properties":{"image":{"type":"string","description":"The vectorized image in base64 format string only if returnImage is true."},"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"PostEmbedInferencesRequest":{"type":"object","required":["text"],"properties":{"text":{"type":"string","description":"The text to embed. Must be a non-empty string."}}},"PutModelPresetByModelIdAndPresetIdRequest":{"type":"object","required":["isDefault"],"properties":{"isDefault":{"type":"boolean","description":"Whether this preset should be the default preset for the model"}}},"PostTranslateInferencesRequest":{"type":"object","required":["prompt"],"properties":{"ensureIPCleared":{"type":"boolean","description":"Whether we try to ensure IP removal for new prompt generation."},"images":{"type":"array","description":"List of images used to condition the generation.\n\nImages are set a data URLs (example: \\\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\\\") or the asset IDs (example: \\\"asset_GTrL3mq4SXWyMxkOHRxlpw\\\").\n\nNotes:\n- in `contextual` mode, images condition prompt generation by using their actual descriptions as context\n- in all other modes, it supersedes the `modelId` parameter when provided.","items":{"type":"string"}},"seed":{"type":"number","description":"If specified, the API will make a best effort to produce the same results, such that repeated requests with the same `seed` and parameters should return the same outputs. Must be used along with the same parameters including prompt, model's state, etc.."},"temperature":{"type":"number","description":"The sampling temperature to use. Higher values like `0.8` will make the output more random, while lower values like `0.2` will make it more focused and deterministic.\n\nWe generally recommend altering this or `topP` but not both.","minimum":0,"maximum":2},"prompt":{"type":"string","description":"The prompt to translate.","default":"translation"},"topP":{"type":"number","description":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So `0.1` means only the tokens comprising the top `10%` probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.","minimum":0,"maximum":1}}},"PostGenerateCustomRequest":{"description":"The request body for the custom generation must be retrieve from GET /models/{modelId} inputs fields"},"PostControlnetIpAdapterInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["image","imageId","modality","prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PostWebhooksWorkflowJobIdResponse":{"type":"object","properties":{"message":{"type":"string"}}},"PutModelsTagsByModelIdResponse":{"type":"object","required":["added","deleted"],"properties":{"deleted":{"type":"array","description":"The list of deleted tags","items":{"type":"string"}},"added":{"type":"array","description":"The list of added tags","items":{"type":"string"}}}},"JobAndAssetAndBillingGenerateResponse":{"type":"object","required":["asset","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]},"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"GetJobsResponse":{"type":"object","required":["jobs"],"properties":{"nextPaginationToken":{"type":"string","description":"A token to query the next page of jobs"},"jobs":{"type":"array","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}}},"PutWorkflowsRunByWorkflowIdRequest":{"description":"The request body for the workflow run must be retrieve from GET /workflows/{workflowId} inputs fields"},"PostInpaintInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"maskId":{"type":"string","description":"Asset id of the mask image"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"}},"required":["image","imageId","mask","maskId","prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PutImagesSegmentationRequest":{"type":"object","properties":{"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"assetId":{"type":"string","description":"Deprecated: The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). Prefer to use image with the asset ID instead."},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2]","items":{"type":"number"}},"name":{"type":"string","description":"The original file name of the image (example: \"low-res-image.jpg\"). It will be ignored if assetId is provided."},"text":{"type":"string","description":"A textual description / keywords describing the object of interest"},"parameters":{"type":"object","description":"*** LEGACY TYPES ****","properties":{"checkpoint":{"type":"string","description":"The checkpoint to use","enum":["fastsam_x","sam_b","sam_h"],"default":"fastsam_x"},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true}}},"points":{"type":"array","description":"List of points, either on the object, or on the background, in the format (label, x, y) in the image where label = 0 for a point on the background and 1 a point on the object of interest.","items":{"type":"array","items":{}}}}},"PostAssetResponse":{"type":"object","required":["asset"],"properties":{"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PostEmbedInferencesResponse":{"type":"object","required":["embedding","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"embedding":{"type":"array","description":"The embedding\n\nA 1_024 dimension vector","items":{"type":"number"}},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PostProjectWebhookEndpointsResponse":{"type":"object","required":["webhookEndpoint"],"properties":{"webhookEndpoint":{"type":"object","properties":{"createdAt":{"type":"string","description":"The date and time the webhook endpoint was created"},"nbTotalCalls":{"type":"number","description":"The number of calls to the webhook endpoint"},"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"nbFailedCalls":{"type":"number","description":"The number of calls to the webhook endpoint that have failed"},"description":{"type":"string","description":"A description of the webhook endpoint"},"id":{"type":"string","description":"The ID of the webhook endpoint"},"secret":{"type":"string","description":"The endpoint's secret, used to generate webhook signatures. Only returned at creation"},"ownerId":{"type":"string","description":"The ID of the owner of the webhook endpoint"},"enabled":{"type":"boolean","description":"Whether the webhook is enabled"},"url":{"type":"string","description":"The URL of the webhook endpoint"},"updatedAt":{"type":"string","description":"The date and time the webhook endpoint was updated"}},"required":["createdAt","enabled","enabledEvents","id","nbFailedCalls","nbTotalCalls","ownerId","updatedAt","url"]}}},"PostWebhooksModalJobIdRequest":{"type":"object","required":["call_id","status"],"properties":{"payload":{"type":"object","properties":{}},"progress":{"type":"number"},"call_id":{"type":"string"},"status":{"type":"string","description":"Types for webhook response payloads","enum":["COMPLETED","FAILED","PENDING","RUNNING"]}}},"GetModelPresetsByModelIdResponse":{"type":"object","required":["presets"],"properties":{"presets":{"type":"array","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The preset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"isDefault":{"type":"boolean","description":"Whether the preset is the default preset of the model (example: true)"},"modelId":{"type":"string","description":"The model ID (example: \"eyVcnFJcR92BxBkz7N6g5w\")"},"id":{"type":"string","description":"The preset ID (example: \"eyVcnFJcR92BxBkz7N6g5w\")"},"authorId":{"type":"string","description":"The author user ID (example: \"VFhihHKMRZyDDnZAJwLb2Q\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"VFhihHKMRZyDDnZAJwLb2Q\")"},"parameters":{"type":"object","description":"The inference parameters","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"updatedAt":{"type":"string","description":"The preset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["authorId","createdAt","id","isDefault","modelId","ownerId","parameters","updatedAt"]}}}},"GetPublicOscuPricesResponse":{"type":"object","required":["prices"],"properties":{"prices":{"type":"array","items":{"type":"object","properties":{"expires":{"type":"number"},"unitAmount":{"type":"number"},"currency":{"type":"string"},"creativeUnits":{"type":"number"}}}}}},"PostWebhooksClerkEmailsRequest":{"type":"object","required":["data","event_attributes","object","timestamp","type"],"properties":{"data":{"type":"object","properties":{"delivered_by_clerk":{"type":"boolean"},"data":{"type":"object","properties":{"app":{"type":"object","properties":{"domain_name":{"type":"string"},"logo_image_url":{"type":"string"},"logo_url":{"type":"string"},"name":{"type":"string"},"url":{"type":"string"}},"required":["domain_name","logo_image_url","logo_url","name","url"]},"requested_by":{"type":"string"},"ttl_minutes":{"type":"number"},"theme":{"type":"object","properties":{"button_text_color":{"type":"string"},"show_clerk_branding":{"type":"boolean"},"primary_color":{"type":"string"}},"required":["button_text_color","primary_color","show_clerk_branding"]},"magic_link":{"type":"string"},"otp_code":{"type":"string"},"user":{"type":"object","properties":{"public_metadata":{"type":"object","properties":{}},"public_metadata_fallback":{"type":"string"}},"required":["public_metadata","public_metadata_fallback"]},"requested_at":{"type":"string"}},"required":["app","magic_link","otp_code","requested_at","requested_by","theme","ttl_minutes","user"]},"subject":{"type":"string"},"to_email_address":{"type":"string"},"body_plain":{"type":"string"},"body":{"type":"string"},"email_address_id":{"type":"string"},"from_email_name":{"type":"string"},"id":{"type":"string"},"slug":{"type":"string"},"object":{"type":"string"},"status":{"type":"string"}},"required":["body","body_plain","data","delivered_by_clerk","email_address_id","from_email_name","id","object","slug","status","subject","to_email_address"]},"event_attributes":{"type":"object","properties":{"http_request":{"type":"object","properties":{"client_ip":{"type":"string"},"user_agent":{"type":"string"}},"required":["client_ip","user_agent"]}},"required":["http_request"]},"type":{"type":"string","enum":["email.created"]},"object":{"type":"string"},"timestamp":{"type":"number"}}},"PutModelsTrainingImagesByModelIdAndTrainingImageIdRequest":{"type":"object","properties":{"data":{"type":"string","description":"The training image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\")"},"assetId":{"type":"string","description":"The asset ID to use as a training image (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). If provided, \"data\" and \"name\" parameters will be ignored."},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"assetIds":{"type":"array","description":"The asset IDs to use as training images (example: [\"asset_GTrL3mq4SXWyMxkOHRxlpw\", \"asset_GTrL3mq4SXWyMxkOHRxlpw\"])\nUsed in batch mode, up to 10 asset IDs are allowed. Cannot be used with \"assetId\" or \"data\" and \"name\" parameters.","items":{"type":"string"}},"preset":{"type":"string","description":"The preset to use for training images","enum":["default","style","subject"]}}},"PutAssetsByCollectionIdResponse":{"type":"object","required":["collection"],"properties":{"collection":{"type":"object","properties":{"createdAt":{"type":"string","description":"The collection creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The thumbnail for the collection (if any)","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"name":{"type":"string","description":"The collection name"},"assetCount":{"type":"number"},"id":{"type":"string","description":"The collection ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"modelCount":{"type":"number"},"itemCount":{"type":"number"},"updatedAt":{"type":"string","description":"The collection last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["assetCount","createdAt","id","itemCount","modelCount","name","ownerId","updatedAt"]}}},"PutImageVectorizationRequest":{"type":"object","required":["returnImage"],"properties":{"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"layerDifference":{"type":"number","description":"Represents the color difference between gradient layers (higher value will reduce the number of layers)\n\nOnly applicable to `color` colorMode.","minimum":0,"maximum":255},"maxIterations":{"type":"number","description":"Max iterations for rendering","minimum":1,"maximum":100000},"cornerThreshold":{"type":"number","description":"Minimum momentary angle (degree) to be considered a corner (higher value will smooth corners)\n\nOnly applicable to `spline` mode.","minimum":0,"maximum":180},"lengthThreshold":{"type":"number","description":"Minimum length of a segment (higher value will generate more coarse output)\n\nOnly applicable to `spline` mode.","minimum":3.5,"maximum":10},"colorMode":{"type":"string","description":"Color mode `bw`, `color`. If `bw`, the image will be considered as black and white.","enum":["bw","color"],"default":"color"},"filterSpeckle":{"type":"number","description":"Discard patches smaller than X px in size (higher value will reduce the number of patches, cleaner output)","minimum":0,"maximum":128},"preset":{"type":"string","description":"If preset given, all other parameters will be ignored (mode, colorMode, filterSpeckle, ...), except for custom.","enum":["asset","bw","custom","photo","pixelart","poster"],"default":"custom"},"mode":{"type":"string","description":"Curver fitting mode `none`, `polygon`, `spline`","enum":["none","polygon","spline"],"default":"spline"},"colorPrecision":{"type":"number","description":"Number of significant bits to use in an RGB channel, min 1, max 16 (higher value will increase precision)\n\nOnly applicable to `color` colorMode.","minimum":1,"maximum":16},"spliceThreshold":{"type":"number","description":"Minimum angle displacement (degree) to splice a spline (higher value reduce accuracy)\n\nOnly applicable to `spline` mode.","minimum":0,"maximum":180},"assetId":{"type":"string","description":"Deprecated: The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). Prefer to use image with the asset ID instead."},"name":{"type":"string","description":"The original file name of the image (example: \"low-res-image.jpg\"). It will be ignored if assetId is provided."},"returnImage":{"type":"boolean","description":"If true, the image will be returned in the response."},"pathPrecision":{"type":"number","description":"Number of decimal places to use in path string","minimum":0}}},"PutModelsTrainingImagesPairsByModelIdRequest":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"PostImg2imgInferencesRequest":{"type":"object","required":["modelId","prompt"],"properties":{"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelId":{"type":"string","description":"The model id to use for the inference"},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false}}},"PostSkyboxBase360InferencesRequest":{"type":"object","required":["prompt"],"properties":{"image":{"type":"string","description":"The image to use as a starting point for the skybox generation. Must reference an existing AssetId or be a data URL."},"styleFidelity":{"type":"number","description":"(deprecated) Condition the influence of the style image. The higher the value, the more the style image will influence the generated skybox image.","minimum":0,"maximum":100},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.","minimum":0,"maximum":2147483647},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"styleImages":{"type":"array","description":"(deprecated) List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"negativePrompt":{"type":"string","description":"A negative full text prompt that discourages the skybox model from generating certain characteristics. It is recommended to test without using a negative prompt. Default: empty string. Example: \"Low resolution, blurry, pixelated, noisy.\""},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":20,"maximum":50},"overrideEmbeddings":{"type":"boolean","description":"(deprecated) Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale.","minimum":0,"maximum":100},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"style":{"type":"string","description":"Style to apply for generation.","enum":["3d-cartoon","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","neon-tron","oil-painting","pastel","photo","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"],"default":"standard"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"structureFidelity":{"type":"number","description":"The structure fidelity if a structureImage or a cannyStructureImage image is provided.","minimum":0,"maximum":100},"prompt":{"type":"string","description":"A full text prompt to guide the skybox generation process. Default: empty string. Example: \"a mountain landscape\""},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100}}},"PostImg2imgIpAdapterInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["image","imageId","prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PostModelsTransferRequest":{"type":"object","required":["destinationProjectId"],"properties":{"destinationProjectId":{"type":"string","description":"The id of the project to copy and transfer the model to"},"destinationTeamId":{"type":"string","description":"The id of the team to copy and transfer the model to"}}},"PutImageUpscaleResponse":{"type":"object","required":["asset"],"properties":{"image":{"type":"string","description":"The upscaled image in base64 format string only if returnImage is true."},"asset":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]}}},"PutModelsByModelIdRequest":{"type":"object","properties":{"negativePromptEmbedding":{"type":"string","description":"Add a negative prompt embedding to every model's generation"},"thumbnail":{"type":"string","description":"The AssetId of the image you want to use as a thumbnail for the model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\"). Set to null to unset the thumbnail"},"concepts":{"type":"array","description":"The concepts is required for composition models. With one or more loras\n\nOnly applicable to Flux based models (and older SD1.5 and SDXL models)","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"classSlug":{"type":"string","description":"The slug of the class you want to use (ex: \"characters-npcs-mobs-characters\"). Set to null to unset the class","pattern":"^[a-z0-9-]+$"},"name":{"type":"string","description":"The model's name (ex: \"Cinematic Realism\").\n\nIf not set, the model's name will be automatically generated when starting training based on training data.","maxLength":64},"epoch":{"type":"string","description":"The epoch of the model. Only available for flux.1-lora and flux.1-kontext-lora based models.\n\nThe epoch can only be set if the model has epochs and is in status \"trained\".\n\nThe default epoch (if not set) is the final model epoch (latest).\n\nSet to null to unset the epoch."},"promptEmbedding":{"type":"string","description":"Add a prompt embedding to every model's generation"},"shortDescription":{"type":"string","description":"The model's short description (ex: \"This model generates highly detailed cinematic scenes.\").\n\nIf not set, the model's short description will be automatically generated when starting training based on training data.","maxLength":256},"type":{"type":"string","description":"The model's type (ex: \"flux.1-lora\").\n\nThe type can only be changed if the model has the \"new\" status.","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"parameters":{"type":"object","description":"The parameters to use for the model's training","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}}}},"PostModelsInferencesByModelIdRequest":{"type":"object","required":["parameters"],"properties":{"parameters":{"type":"object","properties":{"ipAdapterImageIds":{"type":"array","description":"The IpAdapter images as an AssetId. Will be ignored if the `ipAdapterImages` parameter is provided","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"modelId":{"type":"string","description":"The model id to use for the inference"},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImages` instead.\nThe IpAdapter image as a data url. Will be ignored if the `ipAdapterImages` parameter is provided."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"The IpAdapter images as a data url.","items":{"type":"string"}},"imageParentId":{"type":"string","description":"Specifies the parent asset Id for the image when provided as a dataurl."},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"imageHide":{"type":"boolean","description":"Toggles the hidden status of the image when provided as a dataurl.","default":false},"mask":{"type":"string","description":"The mask as a data URL, used to determine the area of change. The mask is a binary mask made out of white and black pixels. The white area is the one that will be replaced. (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAABiVBMVEUAAADw8PDCwsLExMT5+fn19fX8/Pz////+/v79/f2hoaH6+vrc3NxnZ2dDQ0P4+PhkZGTs7OzOzs6Ojo709PRiYmLd3d1paWkoKCji4uI9PT3n5+fe3t7z8/NISEiysrLg4ODk5OSYmJh/f3/u7u5lZWVRUVHS0tKIiIg+Pj7p6emXl5dUVFQYGBjKysqtra1TU1PT09M8PDwcHBzR0dHq6uoEBAQmJiZ8fHzm5ub7+/swMDCrq6uKioqpqalHR0c3NzdOTk6BgYF7e3uwsLCAgIB3d3empqaNjY06OjrW1tZhYWG0tLQgICBxcXEICAhPT0/o6OgkJCRzc3N5eXnV1dXj4+NKSkobGxtaWlpfX1/a2trBwcF2dnYlJSV9fX3Hx8eSkpJNTU1sbGyWlpYRERGCgoIMDAzPz8+MjIy4uLiTk5PNzc3X19cxMTGDg4MpKSm8vLxGRkavr69QUFAKCgoqKiq2trbt7e329vaGhobl5eVra2tZWVk4ODgzMzNcXFyurq63t7dzhmTOAAAFeElEQVR4nO3dZXMUaRQF4EBIOgkQdFncFhZfZPF1d3d3d3f95TthC6iQTE9kuk+Y93m+MpW6t8+p7mGkZ2gIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACK8MDyR784eTw9BRkj1RU70rPQuvFqmjXpeWjVPdUMn6VnokUz86+qjemhaM3obAWoqvRYtGX2/DWgFA93K8DK9GS0Yku3AlTr06PRht+6FqB6Mj0bLVjfvQCeBpRgb00BRtLD0YKaAjgFlKCuAF4ULsAKp4CyHakrgLcGC1BXAKeAAihA4cbqCnAyPR2N211XgNH0dDRup2tA4RSgcLUFOJaejsbVFuC59HQ0bltdAW5KT0fjVtcV4GB6OppXV4C30sPRvLoCHE0PR/PqCvBEejiaV1eA9Gy0QAEKV5P/WHo2WlBTgHXp2WhBTQFuSM9GCzwFKNv3ngKU7WL3ApxOz0YLXAHKNqwAZavJf0V6NlpQU4Bd6dloXpd7BLkClKImfwUogO+Glu3VuvwVYPDV5q8AA2+lAhTtVH3+CjDoeuRf3ZgekEYt61WAi+kJaVLt98JdAwbe/b3z14ABdn4u+VfVY+k5acbNc8u/qjalJ6UJd881/47t6WHpu2PzyL/yCzID56755T/llvTM9M8H88+/Y5+XhQbE2QXlP2XVg5/cfnrPmvHLtxYcG3nhu+dXp/dhfmpvCLIw4+mdmLtn+59/xzPptZirRvL3AeLrRkP5uwxcJxrL3y8MLSVP/XHr7P/QYP5VtbbdJelm/7RYduza+ebXmzdfakSj+XvrcIn4tOGYu9uQXp2O92P5u5vAUjDnt3mbkF6exi/z9X5Mb89r0QLcl16faP7uLR9X+1XP5qXXJ5u/AqT1/KZHs35J71+8bP5OAGl7svn/nd6/eNn8t6TXJ5r/4fT27Evm/1N6e6InALeVXwKC+b+c3p2h8FOA29Lbc2+0ANXe9P7FO5MtQPVN+gCULpx/VQ2nj0Dh0vl7KTgsHb/vhoSl46+cArLS6XcsTx+Dkv2QTr/jxfRBKNnT6fQ7VqUPQsneS6dfuZ9Y1IZ0+h3b0gehZHU//9eWC+mDULLwB0Iv8b+AoLHe+TQufQyKlg6/8kpgVjr9ygkgK51+Vd2ZPgRlS8dffZ4+AoVL5+9u0mHh/Hem9y9eMv0xNxLPG0k2IL08Q0PLkwXwCkBe9s2gX9PrcyJaABeBvGwBjqfXJ1sAp4A4BShc+BZxbhaf9ki2AI+n9ydbgNfT65MtwD/p9fk5WoAP0+uTPQVsTm9PtgDp5cm+H3QmvTxD0VNAenWmrIvlfzC9OpeE4h87ml6c/2XuF74svTZXBOIfOZVemqv29w6sv/79KL0y00y0m/+59L5ca0u7DUivywyrekQ2+vGhPj5VWJdelxkOdAtrbNvE5ceM960ByU2Z3UMzY1q56cj0x1xQgEG26WpA4wfWfjnrY/p0SxG/FbI0vTP1EcHDq7fWPOTtvhTgfGsr0Xf9uLHUV+klWIQ/F38deCO9A4uz2E+TH0ovwGIt7ovl6enpg4nRBefvi+EDYniBLw29lB6cvhlewNMBN4gfMOcmT9yxfe4XhInef5Hr0dmtk5NbJ799Ze36uvg3/pWek+btXdkl/jW/p0ejLbuufXYwtvvd9EwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPfwHLuRXafg0SKQAAAAASUVORK5CYII=\")"},"controlImageId":{"type":"string","description":"The controlnet input image as an AssetId. Will be ignored if the `controlnet` parameter is provided"},"image":{"type":"string","description":"The input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"imageId":{"type":"string","description":"Deprecated: The input image as an AssetId. Prefer to use image with the asset ID instead."},"ipAdapterImageId":{"type":"string","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterImageIds` instead.\nThe IpAdapter image as an AssetId. Cannot be set if `ipAdapterImage` is provided. Will be ignored if the `ipAdapterImageIds` parameter is provided."},"ipAdapterScale":{"type":"number","description":"Deprecated for type txt2img-ip-adapter and img2img-ip-adapter, use `ipAdapterScales` instead.\nIpAdapter scale factor (within [0.0, 1.0], default: 0.9). Will be ignored if the `ipAdapterScales` parameter is provided","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"The mask as an AssetId. Will be ignored if the `image` parameter is provided"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"The controlnet input image as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\")"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]}}},"PutWorkflowUserApprovalByWorkflowIdRequest":{"type":"object","required":["nodeId","workflowJobId"],"properties":{"nodeId":{"type":"string","description":"The ID of the user approval node to approve in the workflow"},"workflowJobId":{"type":"string","description":"The ID of the workflow job that contains the user approval node"}}},"DeleteWorkflowsByWorkflowIdResponse":{},"LockAssetByAssetIdRequest":{"type":"object","required":["lockExpiresAt"],"properties":{"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire."}}},"GetDownloadAssetsResponse":{"type":"object","required":["jobId","jobStatus"],"properties":{"jobId":{"type":"string","description":"The job id associated with the download request"},"jobStatus":{"type":"string","description":"The current job status"},"downloadUrl":{"type":"string","description":"The download url"}}},"PutWorkflowRunByWorkflowIdResponse":{"type":"object","required":["job","workflow"],"properties":{"workflow":{"type":"object","properties":{"thumbnail":{"type":"object","description":"Currently the thumbnail is identical to the after asset.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"before":{"type":"object","description":"A representation of an asset before being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"inputs":{"type":"array","description":"The inputs of the workflow.","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"description":{"type":"string","description":"The description of the workflow."},"privacy":{"type":"string","enum":["private","public","unlisted"]},"uiConfig":{"type":"object","description":"The UI configuration for the workflow. This is managed by scenario webapp.","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"shortDescription":{"type":"string"},"authorId":{"type":"string"},"ownerId":{"type":"string"},"editorInfo":{"type":"object","description":"The UI data about the workflow. This is managed by scenario webapp.","properties":{}},"createdAt":{"type":"string","description":"ISO string"},"tagSet":{"type":"array","description":"The tag set of the workflow.","items":{"type":"string"}},"name":{"type":"string"},"after":{"type":"object","description":"A representation of an asset after being processed by the workflow","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"id":{"type":"string"},"flow":{"type":"array","description":"The flow of the workflow.","items":{"type":"object","properties":{"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."}},"required":["id","type"]}},"outputAssetKinds":{"type":"array","items":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]}},"status":{"type":"string","enum":["deleted","draft","ready"]},"updatedAt":{"type":"string","description":"ISO string"}},"required":["authorId","createdAt","description","editorInfo","flow","id","inputs","name","ownerId","privacy","status","tagSet","updatedAt"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}},"PutImagesPixelateResponse":{},"PutAssetsTagsByAssetIdRequest":{"type":"object","properties":{"add":{"type":"array","description":"The list of tags to add","items":{"type":"string"}},"strict":{"type":"boolean","description":"If true, the function will throw an error if:\n- one of the tags to add already exists\n- one of the tags to delete is not found\nIf false, the endpoint will behave as if it was idempotent","default":true},"delete":{"type":"array","description":"The list of tags to delete","items":{"type":"string"}}}},"GetUserPersonaResponse":{"type":"object","required":["expiresAt","reason","value"],"properties":{"reason":{"type":"string","description":"The reason for that persona"},"value":{"type":"string","description":"The detected persona for given parameters","enum":["audioCreator","default","digitalArtist","enterpriseDesigner","enterpriseDeveloper","gamingIndustryProfessional","independentCreator","marketingManager","pixelArtist","professionalDesigner","professionalDeveloper","studioExecutive","threeDArtist","videoCreator"]},"expiresAt":{"type":"string","description":"The date and time when the persona will expire"}}},"DeleteModelsTrainingImagesByModelIdAndTrainingImageIdResponse":{},"PostRestyleInferencesRequest":{"type":"object","required":["image","styleImages"],"properties":{"image":{"type":"string","description":"The image to restyle. Must reference an existing AssetId or be a data URL."},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"controlEnd":{"type":"number","description":"End step for control."},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.","minimum":0,"maximum":2147483647},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during the restyle.","minimum":0,"maximum":100},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"prompt":{"type":"string","description":"A full text prompt to guide the restyle process. Default: empty string. Example: \"cute++ chibi character\""}}},"PutImagePatchRequest":{"type":"object","required":["assetId"],"properties":{"patch":{"type":"object","description":"The image to be merged.","properties":{"mode":{"type":"string","description":"The mode of merging the images: `override` or `erase`.","enum":["erase","override"],"default":"override"},"image":{"type":"string","description":"The source of the image to be merged, as a data URL (example: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=\") or the asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["image","mode"]},"backgroundColor":{"type":"string","description":"The background color as an hexadecimal code (ex: \"#FFFFFF\"), an html color (ex: \"red\") or \"transparent\" if \"format\" is \"png\". Default to \"white\""},"assetId":{"type":"string","description":"The original asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")."},"format":{"type":"string","description":"The output format. Default to \"png\"","enum":["jpeg","png"]},"position":{"type":"object","description":"The position of the image to be merged.","properties":{"x":{"type":"number","description":"The X position of the image to be merged, in pixels."},"y":{"type":"number","description":"The Y position of the image to be merged, in pixels."}},"required":["x","y"]},"allowOverflow":{"type":"boolean","description":"Whether to allow the merged image to extend the size of the original (when x or y are negative or merged image is bigger)"},"crop":{"type":"object","description":"The crop operation to apply to the image. Applied before any operation. For the backgroundColor: rgba, hex or named color are supported.","properties":{"backgroundColor":{"type":"string"},"top":{"type":"number"},"left":{"type":"number"},"width":{"type":"number"},"height":{"type":"number"}},"required":["height","left","top","width"]}}},"PostModelsCopyByModelIdResponse":{"type":"object","required":["model"],"properties":{"model":{"type":"object","properties":{"trainingImages":{"type":"array","description":"The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The training image upload date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the image"},"downloadUrl":{"type":"string","description":"The URL of the image"},"name":{"type":"string","description":"The original file name of the image (example: \"my-training-image.jpg\")"},"description":{"type":"string","description":"Description for the image"},"id":{"type":"string","description":"The training image ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"}},"required":["automaticCaptioning","createdAt","description","downloadUrl","id","name"]}},"inputs":{"type":"array","description":"The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}","items":{"type":"object","description":"Defines the input parameters for a model. Use this to understand the available input parameters\nfor a given `modelId` when calling `POST /generate/custom/{modelId}`.\nSee {@link https://docs.scenario.com/docs/video-generation} for examples.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"exampleAssetIds":{"type":"array","description":"List of all example asset IDs setup by the model owner","items":{"type":"string"}},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"softDeletionOn":{"type":"string","description":"The date when the model will be soft deleted (only for Free plan)"},"epoch":{"type":"string","description":"The epoch of the model. Only available for Flux Lora Trained models.\nIf not set, uses the final model epoch (latest)"},"uiConfig":{"type":"object","description":"The UI configuration for the model","properties":{"selects":{"type":"object","description":"Configuration for the selects","additionalProperties":{"type":"object","properties":{}}},"inputProperties":{"type":"object","description":"Configuration for the input properties","additionalProperties":{"type":"object","properties":{"collapsed":{"type":"boolean"}}}},"presets":{"type":"array","description":"Configuration for the presets","items":{"type":"object","properties":{"presets":{"type":"object","properties":{}},"fields":{"type":"array","items":{"type":"string"}}},"required":["fields","presets"]}},"triggerGenerate":{"type":"object","description":"Configuration for the trigger generate button","properties":{"after":{"type":"string","description":"The 'name' of the input where the trigger generate button will be displayed (after the input).\nDo not specify both position and after."},"label":{"type":"string"},"position":{"type":"string","description":"The position of the trigger generate button. If position specified, the button will be displayed at the specified position.\nDo not specify both position and after.","enum":["bottom","top"]}},"required":["label"]},"lorasComponent":{"type":"object","description":"Configuration for the loras component","properties":{"modelIdInput":{"type":"string","description":"The input model id (example: a composition or a single LoRA modelId)\nIf specified, the model id will be attached to the output asset as a metadata\nIf the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated"},"scaleInput":{"type":"string","description":"The input name of the scale (number_array)"},"label":{"type":"string","description":"The label of the component"},"modelInput":{"type":"string","description":"The input name of the model (model_array)"}},"required":["label","modelInput","scaleInput"]},"resolutionComponent":{"type":"object","description":"Configuration for the resolution component","properties":{"presets":{"type":"array","description":"The resolution presets","items":{"type":"object","properties":{"width":{"type":"number"},"label":{"type":"string"},"height":{"type":"number"}},"required":["height","label","width"]}},"widthInput":{"type":"string","description":"The input name of the width"},"label":{"type":"string","description":"The label of the component"},"heightInput":{"type":"string","description":"The input name of the height"}},"required":["heightInput","label","presets","widthInput"]}}},"source":{"type":"string","description":"The source of the model","enum":["civitai","huggingface","other","scenario"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"createdAt":{"type":"string","description":"The model creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"complianceMetadata":{"type":"object","description":"Compliance and regulatory metadata for the model","properties":{"subProcessor":{"type":"string","description":"The sub-processor used for the model","enum":["AWS","Fal","Modal","Provider","Replicate"]},"modelProvider":{"type":"string","description":"The provider of the model (examples: 'Google', 'AWS', 'BFL', 'Meshy', etc.)"},"licenseTerms":{"type":"string","description":"URL to license terms"},"dataProcessingComment":{"type":"string","description":"Data processing comment (e.g., zeroRetention, temporaryRetention, etc.)","enum":["researchOnly","serviceImprovement","temporaryRetention","zeroRetention"]},"maintainer":{"type":"string","description":"The maintainer of the model","enum":["Fal","Provider","Replicate","Scenario"]}}},"trainingStats":{"type":"object","description":"Additional information about the model's training","properties":{"trainDuration":{"type":"number","description":"The training duration in seconds"},"queueDuration":{"type":"number","description":"The training queued duration in seconds"},"endedAt":{"type":"string","description":"The training end time as an ISO date string"},"startedAt":{"type":"string","description":"The training start time as an ISO date string"}}},"promptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with prompt embedding"},"trainingProgress":{"type":"object","description":"Additional information about the training progress of the model","properties":{"stage":{"type":"string","description":"The stage of the request","enum":["pending","queued-for-train","running-train","starting-train"]},"remainingTimeMs":{"type":"number","description":"The remaining time in milliseconds"},"progress":{"type":"number","description":"The progress of the job","minimum":0,"maximum":1},"startedAt":{"type":"number","description":"The timestamp in millisecond marking the start of the process"},"position":{"type":"number","description":"Position of the job in the queue (ie. the number of job in the queue before this one)"},"updatedAt":{"type":"number","description":"Timestamp in milliseconds of the last time the training progress was updated"}},"required":["stage","updatedAt"]},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"class":{"type":"object","description":"The class of the model","properties":{"modelId":{"type":"string","description":"The model ID of the class (example: \"stable-diffusion-v1-5\")"},"name":{"type":"string","description":"The class name (example: \"Character Design\")"},"category":{"type":"string","description":"The category slug of the class (example: \"art-style\")"},"thumbnails":{"type":"array","description":"Some example images URLs to showcase the class","items":{"type":"string"}},"prompt":{"type":"string","description":"The class prompt (example: \"a character design\")"},"conceptPrompt":{"type":"string","description":"The concept prompt of the class (example: \"a sks character design\")"},"slug":{"type":"string","description":"The class slug (example: \"art-style-character-design\")"},"status":{"type":"string","description":"The class status (only published classes are listed, but unpublished classes can still appear in existing models)","enum":["published","unpublished"]}},"required":["category","conceptPrompt","modelId","name","prompt","slug","status","thumbnails"]},"updatedAt":{"type":"string","description":"The model last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"A thumbnail for your model","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for your model (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for your model"}},"required":["assetId","url"]},"accessRestrictions":{"type":"number","description":"The access restrictions of the model\n0: Free plan\n25: Creator plan\n50: Pro plan\n75: Team plan\n100: Enterprise plan"},"capabilities":{"type":"array","description":"List of model capabilities (example: [\"txt2img\", \"img2img\", \"txt2img_ip_adapter\", ...])","items":{"type":"string","enum":["3d23d","audio2audio","controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img23d","img2img","img2img_ip_adapter","img2img_texture","img2txt","img2video","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt23d","txt2audio","txt2img","txt2img_ip_adapter","txt2img_texture","txt2txt","txt2video","video2img","video2video"]}},"parentModelId":{"type":"string","description":"The id of the parent model"},"trainingImagePairs":{"type":"array","description":"Array of training image pairs","items":{"type":"object","properties":{"sourceId":{"type":"string","description":"The source asset ID (must be a training asset)"},"targetId":{"type":"string","description":"The target asset ID (must be a training asset)"},"instruction":{"type":"string","description":"The instruction for the image pair, source to target"}}}},"trainingImagesNumber":{"type":"number","description":"The total number of training images"},"custom":{"type":"boolean","description":"Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint"},"modelKeyword":{"type":"string","description":"The model keyword, this is a legacy parameter, please use conceptPrompt in parameters"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"userId":{"type":"string","description":"(Deprecated) The user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"negativePromptEmbedding":{"type":"string","description":"Fine-tune the model's inferences with negative prompt embedding"},"concepts":{"type":"array","description":"The concepts is required for the type model: composition","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"collectionIds":{"type":"array","description":"A list of CollectionId this model belongs to","items":{"type":"string"}},"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"epochs":{"type":"array","description":"The epochs of the model. Only available for Flux Lora Trained models.","items":{"type":"object","properties":{"assets":{"type":"array","description":"The assets of the epoch if sample prompts as been supplied during training","items":{"type":"object","properties":{"assetId":{"type":"string","description":"The AssetId of the image during training (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the asset"}},"required":["assetId","url"]}},"epoch":{"type":"string","description":"The epoch hash to identify the epoch"}},"required":["epoch"]}},"parameters":{"type":"object","description":"The parameters of the model","properties":{"priorLossWeight":{"type":"number","description":"The weight of prior preservation loss\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1.7976931348623157e+308,"exclusiveMinimum":true},"seed":{"type":"number","description":"Used to reproduce previous results. Default: randomly generated number.\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":9007199254740991},"numUNetTrainSteps":{"type":"number","description":"The number of training steps for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"sampleSourceImages":{"type":"array","description":"The sample prompt images (AssetIds) paired with samplePrompts\nOnly available for Flux LoRA training\nMust be the same length as samplePrompts","items":{"type":"string"}},"classPrompt":{"type":"string","description":"The prompt to specify images in the same class as provided instance images\n\nOnly available for SD15 training"},"wandbKey":{"type":"string","description":"The Weights And Bias key to use for logging. The maximum length is 40 characters"},"scaleLr":{"type":"boolean","description":"Whether to scale the learning rate\n\nNote: Legacy parameter, will be ignored\n\nOnly available for SD15 and SDXL LoRA training","default":false},"randomCropScale":{"type":"number","description":"Scale of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"lrScheduler":{"type":"string","description":"The scheduler type to use (default: \"constant\")\n\nOnly available for SD15 and SDXL LoRA training","enum":["constant","constant-with-warmup","cosine","cosine-with-restarts","linear","polynomial"],"default":"constant"},"rank":{"type":"number","description":"The dimension of the LoRA update matrices\n\nOnly available for SDXL and Flux LoRA training\n\nDefault value varies depending on the model type:\n- For SDXL: 64\n- For Flux: 16","minimum":4,"maximum":64},"validationPrompt":{"type":"string","description":"Validation prompt\n\nOnly available for SD15 and SDXL LoRA training","default":""},"conceptPrompt":{"type":"string","description":"The prompt with identifier specifying the instance (or subject) of the class (example: \"a daiton dog\")\n\nDefault value varies depending on the model type:\n- For SD1.5: \"daiton\" if no class is associated with the model\n- For SDXL: \"daiton\"\n- For Flux: \"\""},"maxTrainSteps":{"type":"number","description":"Maximum number of training steps to execute (default: varies depending on the model type)\n\nFor SDXL LoRA training, please use `numTextTrainSteps` and `numUNetTrainSteps` instead\n\nDefault value varies depending on the model type:\n- For SD1.5: round((number of training images * 225) / 3)\n- For SDXL: number of training images * 175\n- For Flux: number of training images * 100\n\nMaximum value varies depending on the model type:\n- For SD1.5 and SDXL: [0, 40000]\n- For Flux: [0, 10000]","minimum":0,"maximum":40000},"nbEpochs":{"type":"number","description":"The number of epochs to train for\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"textEncoderTrainingRatio":{"type":"number","description":"Whether to train the text encoder or not\n\nExample: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps\n\nNote: Legacy parameter, please use `numTextTrainSteps` and `numUNetTrainSteps`\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":0.99},"validationFrequency":{"type":"number","description":"Validation frequency. Cannot be greater than maxTrainSteps value\n\nOnly available for SD15 and SDXL LoRA training","minimum":0},"nbRepeats":{"type":"number","description":"The number of times to repeat the training\n\nOnly available for Flux LoRA training","minimum":1,"maximum":30},"samplePrompts":{"type":"array","description":"The prompts to use for each epoch\nOnly available for Flux LoRA training","items":{"type":"string"}},"randomCropRatio":{"type":"number","description":"Ratio of random crops\n\nOnly available for SD15 and SDXL LoRA training","minimum":0,"maximum":1},"learningRate":{"type":"number","description":"Initial learning rate (after the potential warmup period)\n\nDefault value varies depending on the model type:\n- For SD1.5 and SDXL: 0.000005\n- For Flux: 0.0001","minimum":0,"exclusiveMinimum":true},"optimizeFor":{"type":"string","description":"Optimize the model training task for a specific type of input images. The available values are:\n- \"likeness\": optimize training for likeness or portrait (targets specific transformer blocks)\n- \"all\": train all transformer blocks\n- \"none\": train no specific transformer blocks\n\nThis parameter controls which double and single transformer blocks are trained\nduring the LoRA training process.\n\nOnly available for Flux LoRA training","enum":["likeness"],"default":"undefined"},"randomCrop":{"type":"boolean","description":"Whether to random crop or center crop images before resizing to the working resolution\n\nOnly available for SD15 and SDXL LoRA training","default":false},"numTextTrainSteps":{"type":"number","description":"The number of training steps for the text encoder\n\nOnly available for SDXL LoRA training","minimum":0,"maximum":40000},"learningRateTextEncoder":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the text encoder\n\nMaximum [Flux LoRA: 0.001]\nDefault [SDXL: 0.00005 | Flux LoRA: 0.00001]\nMinimum [SDXL: 0 | Flux LoRA: 0.000001]","minimum":0,"maximum":0.001,"exclusiveMinimum":true},"learningRateUnet":{"type":"number","description":"Initial learning rate (after the potential warmup period) for the UNet\n\nOnly available for SDXL LoRA training","minimum":0,"exclusiveMinimum":true},"batchSize":{"type":"number","description":"The batch size\nLess steps, and will increase the learning rate\n\nOnly available for Flux LoRA training","minimum":1,"maximum":4}}},"compliantModelIds":{"type":"array","description":"List of base model IDs compliant with the model (example: [\"flux.1-dev\", \"flux.1-schnell\"])\nThis attribute is mainly used for Flux LoRA models","items":{"type":"string"}},"status":{"type":"string","description":"The model status","enum":["copying","failed","new","trained","training","training-canceled"]}},"required":["capabilities","collectionIds","createdAt","custom","exampleAssetIds","id","privacy","source","status","tags","trainingImagesNumber","type","updatedAt"]}}},"PutModelsByCollectionIdRequest":{"type":"object","required":["modelIds"],"properties":{"modelIds":{"type":"array","description":"The ids of the models to add to the collection. (Max 49 at once)","items":{"type":"string"}}}},"DeleteWebhookEndpointByIdResponse":{},"PostWorkflowsRequest":{"type":"object","required":["description","name"],"properties":{"name":{"type":"string"},"description":{"type":"string"}}},"PostSearchAssetsRequest":{"type":"object","properties":{"filter":{"type":"string","description":"Filter queries by an attribute's value","default":"undefined"},"image":{"type":"string","description":"Search for similar images with `image` as a reference.\n\nMust be an existing `AssetId` or a valid data URL."},"imageSemanticRatio":{"type":"number","description":"Image embedding ratio for hybrid search, applied when `image`, `images.like`, or `images.unlike`\nare provided","minimum":0,"maximum":1},"images":{"type":"object","description":"Search for similar images with `images.like` and `images.unlike` as a reference\n\nMust be arrays of existing `AssetId` or valid data URLs.","properties":{"like":{"type":"array","description":"Search for similar images with `images.like` as a reference.\n\nMust be an array of existing `AssetId` or valid data URLs.","items":{"type":"string"}},"unlike":{"type":"array","description":"Search for images that are not similar to `images.unlike` as a reference.\n\nMust be an array of existing `AssetId` or valid data URLs.","items":{"type":"string"}}}},"offset":{"type":"number","description":"Number of documents to skip. Must be used with `limit`. Starts from 0.","minimum":0},"public":{"type":"boolean","description":"Search for public images not necessarily belonging to the current `ownerId`","default":false},"hitsPerPage":{"type":"number","description":"Maximum number of documents returned for a page. Must be used with `page`.","minimum":1,"maximum":100},"query":{"type":"string","description":"A string used for querying search results.","default":"''"},"limit":{"type":"number","description":"Maximum number of documents returned. Must be used with `offset`.","minimum":1,"maximum":100},"sortBy":{"type":"array","description":"Sort the search results by the given attributes. Each attribute in the list must be followed by a colon (`:`) and the preferred sorting order: either ascending (`asc`) or descending (`desc`).\n\nExample: `['createdAt:desc']`","items":{"type":"string"}},"page":{"type":"number","description":"Request a specific page of results. Must be used with `hitsPerPage`.","minimum":1},"querySemanticRatio":{"type":"number","description":"Query embedding for hybrid search, if possible","minimum":0,"maximum":1}},"description":"At least one of the following fields must have a value: `query`, `filter`, `image`, or `images`.\n\n`image`, `images` are mutually exclusive."},"TooManyRequests":{"type":"object","required":["reason"],"properties":{"reason":{"type":"string","description":"Some additional information about the error"},"name":{"type":"string","description":"A specific error name"},"details":{"type":"object","description":"Error details","properties":{}}}},"GetModelsExamplesByModelIdResponse":{"type":"object","required":["examples"],"properties":{"examples":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"Model id of the model used to generate the asset"},"inferenceParameters":{"type":"object","description":"The inference parameters used to generate the asset","properties":{"ipAdapterImageIds":{"type":"array","description":"Asset id of the input IpAdapter images","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced\nOnly for \"reference\" inference type","minimum":0,"maximum":1},"controlEnd":{"type":"number","description":"Specifies how long the ControlNet guidance should be applied during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance is active.\nFor example:\n- 1.0: ControlNet guidance is applied during all inference steps\n- 0.5: ControlNet guidance is only applied during the first half of inference steps\n\nDefault values:\n- 0.5 for Canny modality\n- 0.6 for all other modalities","minimum":0.1,"maximum":1},"modality":{"type":"string","description":"The modality associated with the control image used for the generation: it can either be an object with a combination of maximum\n\nFor models of SD1.5 family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `lines`, `seg`, `scribble`, `lineart`, `normal-map`, `illusion`\n - or one of the following presets: `character`, `landscape`, `city`, `interior`.\n\nFor models of the SDXL family:\n - up to 3 modalities from `canny`, `pose`, `depth`, `seg`, `illusion`, `scribble`\n - or one of the following presets: `character`, `landscape`.\n\nFor models of the FLUX schnell or dev families:\n- one modality from: `canny`, `tile`, `depth`, `blur`, `pose`, `gray`, `low-quality`\n\nOptionally, you can associate a value to these modalities or presets. The value must be within `]0.0, 1.0]`.\n\nExamples:\n- `canny`\n- `depth:0.5,pose:1.0`\n- `canny:0.5,depth:0.5,lines:0.3`\n- `landscape`\n- `character:0.5`\n- `illusion:1`\n\nNote: if you use a value that is not supported by the model family, this will result in an error.","enum":["blur","canny","depth","gray","illusion","lineart","lines","low-quality","normal-map","pose","scribble","seg","sketch","tile"]},"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"strength":{"type":"number","description":"Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image's details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)","minimum":0.01,"maximum":1},"ipAdapterType":{"type":"string","description":"The type of IP Adapter model to use. Must be one of [`style`, `character`], default to `style``","enum":["character","style"],"default":"style"},"aspectRatio":{"type":"string","description":"The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra.\nThe aspect ratio is a string formatted as \"width:height\" (example: \"16:9\").","enum":["16:9","1:1","21:9","2:3","3:2","3:4","4:3","4:5","5:4","9:16","9:21"],"default":"1:1"},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["controlnet","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_ip_adapter","controlnet_reference","controlnet_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","inpaint_ip_adapter","outpaint","reference","reference_texture","txt2img","txt2img_ip_adapter","txt2img_texture"]},"ipAdapterImage":{"type":"string","description":"Signed URL to display the IpAdapter image"},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"disableMerging":{"type":"boolean","description":"If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.","default":false},"disableModalityDetection":{"type":"boolean","description":"If false, the process uses the given image to detect the modality.\nIf true (default), the process will not try to detect the modality of the given image.\n\nFor example:\nwith `pose` modality and `false` value, the process will detect the pose of people in the given image\nwith `depth` modality and `false` value, the process will detect the depth of the given image\nwith `scribble` modality and `true`value, the process will use the given image as a scribble\n\n⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️","default":true},"ipAdapterImages":{"type":"array","description":"Signed URL to display the IpAdapter images","items":{"type":"string"}},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"controlStart":{"type":"number","description":"Specifies the starting point of the ControlNet guidance during the inference process.\n\nOnly available for Flux.1-dev based models.\n\nThe value represents the percentage of total inference steps where the ControlNet guidance starts.\nFor example:\n- 0.0: ControlNet guidance starts at the beginning of the inference steps\n- 0.5: ControlNet guidance starts at the middle of the inference steps","minimum":0,"maximum":0.9},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"mask":{"type":"string","description":"Signed URL to display the mask image"},"controlImageId":{"type":"string","description":"Asset id of the controlnet input image"},"image":{"type":"string","description":"Signed URL to display the input image"},"imageId":{"type":"string","description":"Asset id of the input image"},"ipAdapterImageId":{"type":"string","description":"Asset id of the input IpAdapter image"},"ipAdapterScale":{"type":"number","description":"IpAdapter scale factor (within [0.0, 1.0], default: 0.9).","minimum":0,"maximum":1},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"ipAdapterScales":{"type":"array","description":"IpAdapter scale factors (within [0.0, 1.0], default: 0.9).","items":{"type":"number"}},"maskId":{"type":"string","description":"Asset id of the mask image"},"referenceAdain":{"type":"boolean","description":"Whether to use reference adain\nOnly for \"reference\" inference type","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"controlImage":{"type":"string","description":"Signed URL to display the controlnet input image"},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"referenceAttn":{"type":"boolean","description":"Whether to use reference query for self attention's context\nOnly for \"reference\" inference type","default":false},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"}},"required":["prompt","type"]},"inferenceId":{"type":"string","description":"Inference id of the inference used to generate the asset"},"asset":{"type":"object","description":"Asset generated by the inference","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"metadata":{"type":"object","description":"Metadata of the asset with some additional information","properties":{"maxIterations":{"type":"number"},"modality":{"type":"string","description":"Modality to detect","enum":["canny","depth","grayscale","lineart_anime","mlsd","normal","pose","scribble","segmentation","sketch"],"default":"canny"},"tileStyle":{"type":"boolean","description":"If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.","default":false},"modelId":{"type":"string","description":"The modelId used to generate this asset"},"sourceProjectId":{"type":"string"},"bbox":{"type":"array","description":"A bounding box around the object of interest, in the format [x1, y1, x2, y2].","items":{}},"keypointThreshold":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"type":{"type":"string","description":"The type of the asset. Ex: 'inference-txt2img' will represent an asset generated from a text to image model","enum":["3d-texture","3d-texture-albedo","3d-texture-metallic","3d-texture-mtl","3d-texture-normal","3d-texture-roughness","3d23d","3d23d-texture","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-controlnet","inference-controlnet-img2img","inference-controlnet-inpaint","inference-controlnet-inpaint-ip-adapter","inference-controlnet-ip-adapter","inference-controlnet-reference","inference-controlnet-texture","inference-img2img","inference-img2img-ip-adapter","inference-img2img-texture","inference-inpaint","inference-inpaint-ip-adapter","inference-reference","inference-reference-texture","inference-txt2img","inference-txt2img-ip-adapter","inference-txt2img-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture-albedo","texture-ao","texture-edge","texture-height","texture-metallic","texture-normal","texture-smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"text":{"type":"string","description":"A textual description / keywords describing the object of interest.","maxLength":100},"height":{"type":"number"},"nbMasks":{"type":"number"},"creativityDecay":{"type":"number","description":"Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.","minimum":0,"maximum":100},"layerDifference":{"type":"number"},"scalingFactor":{"type":"number","description":"Scaling factor (when `targetWidth` not specified)","minimum":1,"maximum":16},"inputFidelity":{"type":"string","description":"When set to `high`, allows to better preserve details from the input images in the output.\nThis is especially useful when using images that contain elements like faces or logos that\nrequire accurate preservation in the generated image.\n\nYou can provide multiple input images that will all be preserved with high fidelity, but keep\nin mind that the first image will be preserved with richer textures and finer details, so if\nyou include elements such as faces, consider placing them in the first image.\n\nOnly available for the `gpt-image-1` model.","enum":["high","low"],"default":"low"},"modelType":{"type":"string","description":"The type of the generator used","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]},"numOutputs":{"type":"number","description":"The number of outputs to generate.","minimum":1,"maximum":8},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"targetHeight":{"type":"number","description":"The target height of the output image.","minimum":0,"maximum":2048},"concepts":{"type":"array","description":"Flux Kontext LoRA to style the image.\nFor Flux Kontext Prompt Editing.","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"size":{"type":"number"},"spliceThreshold":{"type":"number"},"dilate":{"type":"number","description":"The number of pixels to dilate the result masks.","minimum":0,"maximum":30},"style":{"type":"string","enum":["3d-cartoon","3d-rendered","anime","cartoon","cinematic","claymation","cloud-skydome","comic","cyberpunk","enchanted","fantasy","ink","manga","manga-color","minimalist","neon-tron","oil-painting","pastel","photo","photography","psychedelic","retro-fantasy","scifi-concept-art","space","standard","whimsical"]},"imageFidelity":{"type":"number","description":"Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"inputLocation":{"type":"string","description":"Location of the input image in the output.","enum":["bottom","left","middle","right","top"],"default":"middle"},"seed":{"type":"string","description":"The seed used to generate this asset"},"preset":{"type":"string"},"trainingImage":{"type":"boolean"},"resultMask":{"type":"boolean","description":"Boolean to able return the masks (binary image) in the response.","default":true},"lockExpiresAt":{"type":"string","description":"The ISO timestamp when the lock on the canvas will expire"},"baseModelId":{"type":"string","description":"The baseModelId that maybe changed at inference time"},"pathPrecision":{"type":"number"},"rootParentId":{"type":"string"},"sharpen":{"type":"boolean","description":"Sharpen tiles.","default":false},"cornerThreshold":{"type":"number"},"styleImages":{"type":"array","description":"List of style images. Most of the time, only one image is enough. It must be existing AssetIds.","items":{"type":"string"}},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"filterSpeckle":{"type":"number"},"minThreshold":{"type":"number","description":"Minimum threshold for Grayscale conversion"},"maxThreshold":{"type":"number","description":"Maximum threshold for Grayscale conversion"},"styleImagesFidelity":{"type":"number","description":"Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.","minimum":0,"maximum":100},"depthImage":{"type":"string","description":"The control image processed by depth estimator. Must reference an existing AssetId."},"parentId":{"type":"string"},"parentJobId":{"type":"string"},"width":{"type":"number","description":"The width of the rendered image.","minimum":1024,"maximum":2048},"strength":{"type":"number","description":"The strength\n\nOnly available for the `flux-kontext` LoRA model."},"inferenceId":{"type":"string","description":"The id of the Inference describing how this image was generated"},"lengthThreshold":{"type":"number"},"originalAssetId":{"type":"string"},"backgroundOpacity":{"type":"number","description":"Int to set between 0 and 255 for the opacity of the background in the result images.","minimum":0,"maximum":255},"aspectRatio":{"type":"string","description":"The optional aspect ratio given for the generation, only applicable for some models"},"detailsLevel":{"type":"number","description":"Amount of details to remove or add","minimum":-50,"maximum":50},"fractality":{"type":"number","description":"Determine the scale at which the upscale process works.\n- With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.\n- With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.\n\n(info): A small value is slower and more expensive to run.","minimum":0,"maximum":100},"horizontalExpansionRatio":{"type":"number","description":"(deprecated) Horizontal expansion ratio.","minimum":1,"maximum":2},"points":{"type":"array","description":"List of points (label, x, y) in the image where label = 0 for background and 1 for object.","items":{"type":"array","items":{}}},"mode":{"type":"string"},"colorPrecision":{"type":"number"},"promptFidelity":{"type":"number","description":"Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.","minimum":0,"maximum":100},"betterQuality":{"type":"boolean","description":"Remove small dark spots (i.e. “pepper”) and connect small bright cracks.","default":false},"removeBackground":{"type":"boolean","description":"Remove background for Grayscale detector","default":true},"creativity":{"type":"number","description":"Allow the generation of \"hallucinations\" during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.","minimum":0,"maximum":100},"structureFidelity":{"type":"number","description":"Strength for the input image structure preservation","minimum":0,"maximum":100},"imageType":{"type":"string","description":"Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).","enum":["seamfull","skybox","texture"],"default":"seamfull"},"image":{"type":"string","description":"The input image to process. Must reference an existing AssetId or be a data URL."},"thumbnail":{"type":"object","description":"The thumbnail of the canvas","properties":{"assetId":{"type":"string","description":"The AssetId of the image used as a thumbnail for the canvas (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"url":{"type":"string","description":"The url of the image used as a thumbnail for the canvas"}},"required":["assetId","url"]},"structureImage":{"type":"string","description":"The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId."},"invert":{"type":"boolean","description":"To invert the relief","default":false},"kind":{"type":"string","enum":["3d","audio","document","image","image-hdr","json","video"]},"colorMode":{"type":"string"},"highThreshold":{"type":"number","description":"High threshold for Canny detector"},"verticalExpansionRatio":{"type":"number","description":"(deprecated) Vertical expansion ratio.","minimum":1,"maximum":2},"halfMode":{"type":"boolean"},"name":{"type":"string"},"negativePromptStrength":{"type":"number","description":"Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"refinementSteps":{"type":"number","description":"Additional refinement steps before scaling.\n\nIf scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times.\nIf scalingFactor > 1, the refinement process will be applied refinementSteps times.","minimum":0,"maximum":4},"referenceImages":{"type":"array","description":"The reference images used for the asset generation or editing","items":{"type":"string"}},"styleFidelity":{"type":"number","description":"The higher the value the more it will look like the style image(s)","minimum":0,"maximum":100},"resultContours":{"type":"boolean","description":"Boolean to output the contours.","default":false},"controlEnd":{"type":"number","description":"End step for control."},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"scheduler":{"type":"string","description":"The scheduler used to generate this asset"},"overrideEmbeddings":{"type":"boolean","description":"Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution."},"progressPercent":{"type":"number"},"factor":{"type":"number","description":"Contrast factor for Grayscale detector"},"mask":{"type":"string","description":"The mask used for the asset generation or editing"},"lowThreshold":{"type":"number","description":"Low threshold for Canny detector"},"depthFidelity":{"type":"number","description":"The depth fidelity if a depth image provided","minimum":0,"maximum":100},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"copiedAt":{"type":"string","description":"The date when the asset was copied to a project"},"contours":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},"negativePrompt":{"type":"string","description":"The negative prompt used to generate this asset"},"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"resizeOption":{"type":"number","description":"Size proportion of the input image in the output.","minimum":0.1,"maximum":1},"outputIndex":{"type":"number"},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"guidance":{"type":"number","description":"The guidance used to generate this asset"},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation.","minimum":5,"maximum":50},"hdr":{"type":"number"},"clustering":{"type":"boolean","description":"Activate clustering.","default":false},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1},"colorCorrection":{"type":"boolean","description":"Ensure upscaled tile have the same color histogram as original tile.","default":true},"sketch":{"type":"boolean","description":"Activate sketch detection instead of canny.","default":false},"overlapPercentage":{"type":"number","description":"Overlap percentage for the output image.","minimum":0,"maximum":0.5},"prompt":{"type":"string","description":"The prompt that guided the asset generation or editing"},"resultImage":{"type":"boolean","description":"Boolean to able output the cut out object.","default":false},"cannyStructureImage":{"type":"string","description":"The control image already processed by canny detector. Must reference an existing AssetId."},"geometryEnforcement":{"type":"number","description":"Apply extra control to the Skybox 360 geometry.\nThe higher the value, the more the 360 geometry will influence the generated skybox image.\n\nUse with caution. Default is adapted to the other parameters.","minimum":0,"maximum":100},"targetWidth":{"type":"number","description":"Target width for the upscaled image, take priority over scaling factor","minimum":1024,"maximum":16000}},"required":["kind","type"]},"automaticCaptioning":{"type":"string","description":"Automatic captioning of the asset"},"description":{"type":"string","description":"The description, it will contain in priority:\n- the manual description\n- the advanced captioning when the asset is used in training flow\n- the automatic captioning"},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"createdAt":{"type":"string","description":"The asset creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"firstFrame":{"type":"object","description":"The video asset's first frame.\n\nContains the assetId and the url of the first frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"editCapabilities":{"type":"array","description":"List of edit capabilities","items":{"type":"string","enum":["DETECTION","GENERATIVE_FILL","PIXELATE","PROMPT_EDITING","REFINE","REFRAME","REMOVE_BACKGROUND","SEGMENTATION","UPSCALE","UPSCALE_360","VECTORIZATION"]}},"embedding":{"type":"array","description":"The embedding of the asset when requested.\n\nOnly available when an asset can be embedded (ie: not Detection maps)","items":{"type":"number"}},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"updatedAt":{"type":"string","description":"The asset last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"nsfw":{"type":"array","description":"The NSFW labels","items":{"type":"string"}},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"lastFrame":{"type":"object","description":"The video asset's last frame.\n\nContains the assetId and the url of the last frame.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"url":{"type":"string","description":"Signed URL to get the asset content"},"isHidden":{"type":"boolean","description":"Whether the asset is hidden."},"tags":{"type":"array","description":"The associated tags (example: [\"sci-fi\", \"landscape\"])","items":{"type":"string"}},"outputIndex":{"type":"number","description":"The output index of the asset within a job\nThis index is an positive integer that starts at 0\nIt is used to differentiate between multiple outputs of the same job\nIf the job has only one output, this index is 0"},"collectionIds":{"type":"array","description":"A list of CollectionId this asset belongs to","items":{"type":"string"}},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"status":{"type":"string","description":"The actual status","enum":["error","pending","success"]}},"required":["authorId","collectionIds","createdAt","editCapabilities","id","kind","metadata","mimeType","ownerId","privacy","properties","source","status","tags","updatedAt","url"]},"job":{"type":"object","description":"The job associated with the asset","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}},"required":["asset","modelId"]}}}},"PutModelsDescriptionByModelIdResponse":{"type":"object","required":["description"],"properties":{"description":{"type":"object","properties":{"models":{"type":"array","description":"The list of models referenced by the Markdown `{model}` tag in the description.","items":{"type":"object","properties":{"name":{"type":"string","description":"The model name (example: \"Cinematic Realism\")"},"privacy":{"type":"string","description":"The privacy of the model (default: private)","enum":["private","public","unlisted"],"default":"private"},"id":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"shortDescription":{"type":"string","description":"The model short description (example: \"This model generates highly detailed cinematic scenes.\")"},"authorId":{"type":"string","description":"The author user ID (example: \"user_VFhihHKMRZyDDnZAJwLb2Q\")"},"ownerId":{"type":"string","description":"The owner ID (example: \"team_VFhihHKMRZyDDnZAJwLb2Q\")"},"type":{"type":"string","description":"The model type (example: \"flux.1-lora\")","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"required":["id","privacy","type"]}},"assets":{"type":"array","description":"The list of assets referenced by the Markdown `{asset}` tag in the description.","items":{"type":"object","properties":{"preview":{"type":"object","description":"The asset's preview.\n\nContains the assetId and the url of the preview.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"thumbnail":{"type":"object","description":"The asset's thumbnail.\n\nContains the assetId and the url of the thumbnail.","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]},"kind":{"type":"string","description":"The kind of asset","enum":["3d","audio","document","image","image-hdr","json","video"]},"privacy":{"type":"string","description":"The privacy of the asset","enum":["private","public","unlisted"]},"id":{"type":"string","description":"The asset ID (example: \"asset_GTrL3mq4SXWyMxkOHRxlpw\")"},"mimeType":{"type":"string","description":"The mime type of the asset (example: \"image/png\")"},"originalFileUrl":{"type":"string","description":"The original file url.\n\nContains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets.\nIs only specified if the given asset data has been replaced with a new file during the creation of the asset."},"source":{"type":"string","description":"source of the asset","enum":["3d23d","3d23d:texture","3d:texture","3d:texture:albedo","3d:texture:metallic","3d:texture:mtl","3d:texture:normal","3d:texture:roughness","audio2audio","background-removal","canvas","canvas-drawing","canvas-export","detection","generative-fill","image-prompt-editing","img23d","img2img","img2video","inference-control-net","inference-control-net-img","inference-control-net-inpainting","inference-control-net-inpainting-ip-adapter","inference-control-net-ip-adapter","inference-control-net-reference","inference-control-net-texture","inference-img","inference-img-ip-adapter","inference-img-texture","inference-in-paint","inference-in-paint-ip-adapter","inference-reference","inference-reference-texture","inference-txt","inference-txt-ip-adapter","inference-txt-texture","patch","pixelization","reframe","restyle","segment","segmentation-image","segmentation-mask","skybox-3d","skybox-base-360","skybox-hdri","texture","texture:albedo","texture:ao","texture:edge","texture:height","texture:metallic","texture:normal","texture:smoothness","txt23d","txt2audio","txt2img","txt2video","unknown","uploaded","uploaded-3d","uploaded-audio","uploaded-avatar","uploaded-video","upscale","upscale-skybox","upscale-texture","upscale-video","vectorization","video2img","video2video"]},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"ownerId":{"type":"string","description":"The owner (project) ID (example: \"proj_23tlk332lkht3kl2\" or \"team_dlkhgs23tlk3hlkth32lkht3kl2\" for old teams)"},"properties":{"type":"object","description":"The properties of the asset, content may depend on the kind of asset returned","properties":{"transcription":{"type":"object","description":"Transcription of the audio","properties":{"text":{"type":"string"}},"required":["text"]},"hasNormals":{"type":"boolean","description":"Whether the mesh has normal vectors"},"format":{"type":"string","description":"Format of the mesh file (e.g. 'glb', etc.)"},"description":{"type":"string","description":"Description of the audio"},"bitrate":{"type":"number","description":"Bitrate of the media in bits per second"},"boneCount":{"type":"number","description":"Number of bones if skeleton exists"},"classification":{"type":"string","description":"Classification of the audio","enum":["effect","interview","music","other","sound","speech","text","unknown"]},"sampleRate":{"type":"number","description":"Sample rate of the media in Hz"},"faceCount":{"type":"number","description":"Number of faces/triangles in the mesh"},"vertexCount":{"type":"number","description":"Number of vertices in the mesh"},"animationFrameCount":{"type":"number","description":"Number of animation frames if animations exist"},"duration":{"type":"number","description":"Duration of the media in seconds"},"frameRate":{"type":"number","description":"Frame rate of the video in frames per second"},"nbFrames":{"type":"number","description":"Number of frames in the video"},"channels":{"type":"number","description":"Number of channels of the audio"},"size":{"type":"number"},"hasSkeleton":{"type":"boolean","description":"Whether the mesh has bones/skeleton"},"width":{"type":"number"},"hasAnimations":{"type":"boolean","description":"Whether the mesh has animations"},"codecName":{"type":"string","description":"Codec name of the media"},"hasUVs":{"type":"boolean","description":"Whether the mesh has UV coordinates"},"dimensions":{"type":"array","description":"Bounding box dimensions [width, height, depth]","items":{}},"height":{"type":"number"}},"required":["size"]},"url":{"type":"string","description":"Signed URL to get the asset content"}},"required":["authorId","id","kind","mimeType","ownerId","privacy","properties","source","url"]}},"value":{"type":"string","description":"The markdown description of the model (ex: `# My model`).\nWe allow the `{asset:<assetId>}` and `{model:<modelId>}` tags."}},"required":["assets","models","value"]}}},"GetProjectWebhookEndpointsResponse":{"type":"object","required":["webhookEndpoints"],"properties":{"webhookEndpoints":{"type":"array","description":"A list of the project webhooks","items":{"type":"object","properties":{"createdAt":{"type":"string","description":"The date and time the webhook endpoint was created"},"nbTotalCalls":{"type":"number","description":"The number of calls to the webhook endpoint"},"enabledEvents":{"type":"array","description":"The events that trigger the webhook. ['*'] indicates that all events are enabled","items":{"type":"string","enum":["*","asset.download.completed","asset.download.created","asset.download.failed","generation.cancelled","generation.completed","generation.created","generation.failed","inference.cancelled","inference.completed","inference.created","inference.failed","model.download.completed","model.download.created","model.download.failed","model.training.cancelled","model.training.completed","model.training.failed","model.training.started"]}},"nbFailedCalls":{"type":"number","description":"The number of calls to the webhook endpoint that have failed"},"description":{"type":"string","description":"A description of the webhook endpoint"},"id":{"type":"string","description":"The ID of the webhook endpoint"},"secret":{"type":"string","description":"The endpoint's secret, used to generate webhook signatures. Only returned at creation"},"ownerId":{"type":"string","description":"The ID of the owner of the webhook endpoint"},"enabled":{"type":"boolean","description":"Whether the webhook is enabled"},"url":{"type":"string","description":"The URL of the webhook endpoint"},"updatedAt":{"type":"string","description":"The date and time the webhook endpoint was updated"}},"required":["createdAt","enabled","enabledEvents","id","nbFailedCalls","nbTotalCalls","ownerId","updatedAt","url"]}}}},"PostTextureInferencesRequest":{"type":"object","required":["texture"],"properties":{"defaultParameters":{"type":"boolean","description":"If true, use the default parameters","default":false},"polished":{"type":"number","description":"How polished is the surface? 0 is like a rough surface, 1 is like a mirror","minimum":0,"maximum":1},"angular":{"type":"number","description":"How angular is the surface? 0 is like a sphere, 1 is like a mechanical object","minimum":0,"maximum":1},"invert":{"type":"boolean","description":"To invert the relief","default":false},"saveFlipbook":{"type":"boolean","description":"Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px","default":true},"texture":{"type":"string","description":"The asset to convert in texture maps. Must reference an existing AssetId."},"raised":{"type":"number","description":"How raised is the surface? 0 is flat like water, 1 is like a very rough rock","minimum":0,"maximum":1},"shiny":{"type":"number","description":"How shiny is the surface? 0 is like a matte surface, 1 is like a diamond","minimum":0,"maximum":1}}},"PostTxt2imgTextureInferencesRequest":{"type":"object","required":["modelId","prompt"],"properties":{"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelId":{"type":"string","description":"The model id to use for the inference"},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048}}},"PostTxt2imgTextureInferencesResponse":{"type":"object","required":["inference","job"],"properties":{"creativeUnitsCost":{"type":"number","description":"The Creative Units cost for the request billed"},"creativeUnitsDiscount":{"type":"number","description":"The Creative Units discount for the request billed"},"inference":{"type":"object","properties":{"seed":{"type":"string","description":"Used to reproduce previous results. Default: randomly generated number."},"modelEpoch":{"type":"string","description":"The epoch of the model to use for the inference. Only available for Flux Lora Trained models."},"hideResults":{"type":"boolean","description":"If set, generated assets will be hidden and not returned in the list of images of the inference\nor when listing assets (default: false)","default":false},"type":{"type":"string","description":"The type of inference to use. Example: txt2img, img2img, etc.\n\nSelecting the right type will condition the expected parameters.\n\nNote: if model.type is `sd-xl*` or `sd-1_5*`, when using the `\"inpaint\"` inference type, Scenario determines the best available `baseModel` for a given `modelId`: one of `[\"stable-diffusion-inpainting\", \"stable-diffusion-xl-1.0-inpainting-0.1\"] will be used.","enum":["txt2img","txt2img_ip_adapter","txt2img_texture","img2img","img2img_ip_adapter","img2img_texture","inpaint","outpaint","inpaint_ip_adapter","controlnet","controlnet_ip_adapter","reference","reference_texture","controlnet_reference","controlnet_img2img","controlnet_inpaint","controlnet_inpaint_ip_adapter","controlnet_texture"]},"negativePrompt":{"type":"string","description":"The prompt not to guide the image generation, ignored when guidance < 1 (example: \"((ugly face))\")\nFor Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet."},"scheduler":{"type":"string","description":"The scheduler to use to override the default configured for the model. See detailed documentation for more details.","enum":["DDIMScheduler","DDPMScheduler","DEISMultistepScheduler","DPMSolverMultistepScheduler","DPMSolverSinglestepScheduler","EulerAncestralDiscreteScheduler","EulerDiscreteScheduler","HeunDiscreteScheduler","KDPM2AncestralDiscreteScheduler","KDPM2DiscreteScheduler","LCMScheduler","LMSDiscreteScheduler","PNDMScheduler","TCDScheduler","UniPCMultistepScheduler"]},"intermediateImages":{"type":"boolean","description":"Enable or disable the intermediate images generation (default: false)","default":false},"concepts":{"type":"array","items":{"type":"object","properties":{"modelId":{"type":"string","description":"The model ID (example: \"model_eyVcnFJcR92BxBkz7N6g5w\")"},"modelEpoch":{"type":"string","description":"The epoch of the model (example: \"000001\")\nOnly available for Flux Lora Trained models"},"scale":{"type":"number","description":"The scale of the model (example: 1.0)\nFor Flux Kontext Prompt Editing, the scale is between 0 and 2.","minimum":-2,"maximum":2}},"required":["modelId","scale"]}},"guidance":{"type":"number","description":"Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:\n- For Flux dev models, the default is 3.5 and allowed values are within [0, 10]\n- For Flux pro models, the default is 3 and allowed values are within [2, 5]\n- For SDXL models, the default is 6 and allowed values are within [0, 20]\n- For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]","minimum":0,"maximum":20},"numInferenceSteps":{"type":"number","description":"The number of denoising steps for each image generation (within [1, 150], default: 30)","minimum":1,"maximum":150},"numSamples":{"type":"number","description":"The number of images to generate (within [1, 128], default: 4)","minimum":1,"maximum":128},"width":{"type":"number","description":"The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the width must be within [512, 2048]\nIf model.type is `sd-1_5`, the width must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048},"negativePromptStrength":{"type":"number","description":"Only applicable for flux-dev based models for `txt2img`, `img2img`, and `controlnet` inference types.\n\nControls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence.\nMust be > 0 if negativePrompt is provided.","minimum":0,"maximum":10},"baseModelId":{"type":"string","description":"The base model to use for the inference. Only Flux LoRA models can use this parameter.\nAllowed values are available in the model's attribute: `compliantModelIds`"},"prompt":{"type":"string","description":"Full text prompt including the model placeholder. (example: \"an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect\")"},"height":{"type":"number","description":"The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512)\nIf model.type is `sd-xl`, `sd-xl-lora`, `sd-xl-composition` the height must be within [512, 2048]\nIf model.type is `sd-1_5`, the height must be within [64, 1024]\nIf model.type is `flux.1.1-pro-ultra`, you can use the aspectRatio parameter instead","minimum":64,"maximum":2048}},"required":["prompt","type"]},"job":{"type":"object","properties":{"createdAt":{"type":"string","description":"The job creation date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"},"jobId":{"type":"string","description":"The job ID (example: \"job_ocZCnG1Df35XRL1QyCZSRxAG8\")"},"metadata":{"type":"object","description":"Metadata of the job with some additional information","properties":{"output":{"type":"object","description":"The output of the job","properties":{}},"input":{"type":"object","description":"The inputs for the job","properties":{}},"assetIds":{"type":"array","description":"List of produced assets for this job","items":{"type":"string"}},"flow":{"type":"array","description":"The flow of the job. Only available for workflow jobs.","items":{"type":"object","properties":{"jobId":{"type":"string","description":"If the flow is part of a WorkflowJob, this is the jobId for the node.\njobId is only available for nodes started. A node \"Pending\" for a running workflow job is not started."},"assets":{"type":"array","description":"List of produced assets for this node.","items":{"type":"object","properties":{"assetId":{"type":"string"},"url":{"type":"string"}},"required":["assetId","url"]}},"logicType":{"type":"string","description":"The type of the logic for the node.\nOnly available for logic nodes.","enum":["if-else"]},"dependsOn":{"type":"array","description":"The nodes that this node depends on.\nOnly available for nodes that have dependencies. Mainly used for user approval nodes.","items":{"type":"string"}},"modelId":{"type":"string","description":"The model id for the node. Mainly used for custom model tasks."},"inputs":{"type":"array","description":"The inputs of the node.","items":{"type":"object","description":"This is used to run the flow.","properties":{"parent":{"type":"boolean","description":"Whether this input represents a parent asset to assign to the produced assets.\nOnly available for \\`file\\` and \\`file_array\\` input types.\n\nFor \\`file_array\\`, the parent asset is the first item in the array."},"color":{"type":"boolean","description":"Whether the input is a color or not. Only available for \\`string\\` input type."},"inputs":{"type":"array","description":"The list of inputs which form an object within a container array.\nAll inputs are the same as the current object.\nThis is only available for type inputs_array inputs.","items":{"type":"object","properties":{}}},"minLength":{"type":"number","description":"The minimum allowed length for string inputs. Also applies to each item in \\`string_array\\`."},"backgroundBehavior":{"type":"string","description":"Specifies the background behavior for the input. Only available for \\`file\\` and \\`file_array\\`\ninput types with kind \\`image\\`.","enum":["opaque","transparent"]},"description":{"type":"string","description":"Help text displayed in the UI to provide additional information about the input"},"type":{"type":"string","description":"The data type of the input","enum":["boolean","file","file_array","inputs_array","model","model_array","number","number_array","string","string_array"]},"required":{"type":"object","description":"Set of rules that describes when this input is required:\n- \\`always\\`: Input is always required\n- \\`ifNotDefined\\`: Input is required when another specified input is not defined\n- \\`ifDefined\\`: Input is required when another specified input is defined\n- \\`conditionalValues\\`: Input is required when another input has a specific value\n\nBy default, the input is not required.","properties":{"always":{"type":"boolean","description":"Whether the input is always required"},"ifNotDefined":{"type":"object","description":"Makes this input required when another input is not defined:\n- Key: name of the input that must be undefined\n- Value: message to display when this input is required","properties":{}},"ifDefined":{"type":"object","description":"Makes this input required when another input is defined:\n- Key: name of the input that must be defined\n- Value: message to display when this input is required","properties":{}},"conditionalValues":{"type":"object","description":"Makes this input required when another input has a specific value:\n- Key: name of the input to check\n- Value: operation and allowed values that trigger the requirement","properties":{}}}},"modelTypes":{"type":"array","description":"The allowed model types for this input. Example: \\`[\"flux.1-lora\"]\\`.\nOnly available for \\`model_array\\` input type.","items":{"type":"string","enum":["custom","flux.1","flux.1-composition","flux.1-kontext-dev","flux.1-kontext-lora","flux.1-krea-dev","flux.1-krea-lora","flux.1-lora","flux.1-pro","flux.1.1-pro-ultra","flux1.1-pro","gpt-image-1","sd-1_5","sd-1_5-composition","sd-1_5-lora","sd-xl","sd-xl-composition","sd-xl-lora"]}},"ref":{"type":"object","description":"The reference to another input or output of the same workflow.\nMust have at least one of node or conditional.","properties":{"equal":{"type":"string","description":"This is the desired node output value if ref is an if/else node."},"node":{"type":"string","description":"The node id or 'workflow' if the source is a workflow input."},"conditional":{"type":"array","description":"The conditional nodes to reference.\nIf the conditional nodes are successful, the node will be successful.\nIf the conditional nodes are skipped, the node will be skipped.\nContains an array of node ids used to check the status of the nodes.","items":{"type":"string"}},"name":{"type":"string","description":"The name of the input or output to reference.\nIf the type is 'workflow', the name is the name of the input of the workflow is required\nIf the type is 'node', the name is not mandatory, except if you want all outputs of the node.\nTo get all outputs of a node, you can use the name 'all'."}}},"min":{"type":"number","description":"The minimum allowed value. Only available for \\`number\\` and array input types."},"promptSpark":{"type":"boolean","description":"Whether the input is used with prompt spark. Only available for \\`string\\` input type."},"placeholder":{"type":"string","description":"Placeholder text for the input. Only available for 'string' input type."},"group":{"type":"string","description":"Used to visually group inputs together in the UI. Inputs with the same group value appear\nconsecutively in the UI."},"allowedValues":{"type":"array","description":"The allowed values for the input. For \\`string\\` or \\`number\\` types, creates a single-select\ndropdown.\nFor \\`string_array\\` type, creates a multi-select dropdown.","items":{}},"costImpact":{"type":"boolean","description":"Whether this input affects the model's cost calculation"},"max":{"type":"number","description":"The maximum allowed value. Only available for \\`number\\` and \\`array\\` input types."},"kind":{"type":"string","description":"The asset kind of the input. Only taken into account for \\`file\\` and \\`file_array\\` input types.","enum":["3d","audio","document","image","image-hdr","json","video"]},"maskFrom":{"type":"string","description":"The name of the file input field to use as the mask source"},"label":{"type":"string","description":"The label displayed in the UI for this input"},"hint":{"type":"string","description":"Hint text displayed in the UI as a tooltip to guide the user"},"name":{"type":"string","description":"The name that must be user to call the model through the API"},"step":{"type":"number","description":"The step increment for numeric inputs. Only available for \\`number\\` input type.","minimum":1},"prompt":{"type":"boolean","description":"Whether the input is a prompt. When true, displays as a text area with prompt spark feature.\nOnly available for \\`string\\` input type."},"maxLength":{"type":"number","description":"The maximum allowed length for \\`string\\` inputs. Also applies to each item in \\`string_array\\`."}},"required":["name","type"]}},"id":{"type":"string","description":"The id of the node."},"logic":{"type":"object","description":"The logic of the node.\nOnly available for logic nodes.","properties":{"default":{"type":"string","description":"The default case of the logic.\nContains the id/output of the node to execute if no case is matched.\nOnly available for if/else nodes."},"transform":{"type":"string","description":"The transform of the logic.\nOnly available for transform nodes."},"cases":{"type":"array","description":"The cases of the logic.\nOnly available for if/else nodes.","items":{"type":"object","properties":{"condition":{"type":"string"},"value":{"type":"string"}},"required":["condition","value"]}}}},"type":{"type":"string","description":"The type of the job for the node.","enum":["custom-model","generate-prompt","logic","remove-background","transform","user-approval","workflow"]},"workflowId":{"type":"string","description":"The workflow id for the node. Mainly used for workflow tasks."},"status":{"type":"string","description":"The status of the node. Only available for WorkflowJob nodes.","enum":["failure","pending","processing","skipped","success"]}},"required":["id","status","type"]}},"workflowId":{"type":"string","description":"The workflow ID of the job if job is part of a workflow."},"workflowJobId":{"type":"string","description":"The workflow job ID of the job if job is part of a workflow job."}}},"statusHistory":{"type":"array","description":"The history of the different statuses the job went through with the ISO string date\nof when the job reached each statuses.","items":{"type":"object","properties":{"date":{"type":"string"},"status":{"type":"string","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]}},"required":["date","status"]}},"progress":{"type":"number","description":"Progress of the job (between 0 and 1)"},"authorId":{"type":"string","description":"The author user ID (example: \"dcf121faaa1a0a0bbbd9ca1b73d62aea\")"},"jobType":{"type":"string","description":"The type of job","enum":["assets-download","canvas-export","caption","caption-llava","custom","describe-style","detection","embed","flux","flux-model-training","generate-prompt","image-generation","image-prompt-editing","inference","mesh-preview-rendering","model-download","model-import","model-training","openai-image-generation","patch-image","pixelate","reframe","remove-background","repaint","restyle","segment","skybox-3d","skybox-base-360","skybox-hdri","skybox-upscale-360","texture","translate","upload","upscale","upscale-skybox","upscale-texture","vectorize","workflow"]},"ownerId":{"type":"string","description":"The owner ID (example: \"team_U3Qmc8PCdWXwAQJ4Dvw4tV6D\")"},"billing":{"type":"object","description":"The billing of the job","properties":{"cuDiscount":{"type":"number"},"cuCost":{"type":"number"}},"required":["cuCost","cuDiscount"]},"status":{"type":"string","description":"The current status of the job","enum":["canceled","failure","finalizing","in-progress","pending","queued","success","warming-up"]},"updatedAt":{"type":"string","description":"The job last update date as an ISO string (example: \"2023-02-03T11:19:41.579Z\")"}},"required":["createdAt","jobId","jobType","metadata","progress","status","statusHistory","updatedAt"]}}}}}}