Skip to content
Get started

Models

List
client.models.list(ModelListParams { blacklisted, collectionId, collectionIds, 15 more } query?, RequestOptionsoptions?): ModelsCursor<ModelListResponse { id, capabilities, collectionIds, 35 more } >
GET/models
Create
client.models.create(ModelCreateParams { originalAssets, baseModelId, classSlug, 5 more } params, RequestOptionsoptions?): ModelCreateResponse { model }
POST/models
Get Bulk
client.models.getBulk(ModelGetBulkParams { originalAssets, allTrainingImages, minimal, 4 more } params, RequestOptionsoptions?): ModelGetBulkResponse { models }
POST/models/get-bulk
Retrieve
client.models.retrieve(stringmodelID, ModelRetrieveParams { originalAssets } query?, RequestOptionsoptions?): ModelRetrieveResponse { model }
GET/models/{modelId}
Update
client.models.update(stringmodelID, ModelUpdateParams { originalAssets, classSlug, concepts, 8 more } params, RequestOptionsoptions?): ModelUpdateResponse { model }
PUT/models/{modelId}
Delete
client.models.delete(stringmodelID, RequestOptionsoptions?): ModelDeleteResponse
DELETE/models/{modelId}
Copy
client.models.copy(stringmodelID, ModelCopyParams { originalAssets, copyAsTrained, copyExamples } params, RequestOptionsoptions?): ModelCopyResponse { model }
POST/models/{modelId}/copy
Download
client.models.download(stringmodelID, ModelDownloadParams { modelEpoch } body, RequestOptionsoptions?): ModelDownloadResponse { jobId }
POST/models/{modelId}/download
Delete Images
client.models.deleteImages(stringmodelID, ModelDeleteImagesParams { ids } params, RequestOptionsoptions?): ModelDeleteImagesResponse
DELETE/models/{modelId}/images
Update Tags
client.models.updateTags(stringmodelID, ModelUpdateTagsParams { add, _delete, strict } body, RequestOptionsoptions?): ModelUpdateTagsResponse { added, deleted }
PUT/models/{modelId}/tags
Transfer
client.models.transfer(stringmodelID, ModelTransferParams { destinationProjectId, destinationTeamId } body, RequestOptionsoptions?): ModelTransferResponse { model }
POST/models/{modelId}/transfer
ModelsExpand Collapse
ModelListResponse { id, capabilities, collectionIds, 35 more }
id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

capabilities: Array<"3d23d" | "audio2audio" | "audio2video" | 29 more>

List of model capabilities (example: [“txt2img”, “img2img”, “txt2img_ip_adapter”, …])

One of the following:
"3d23d"
"audio2audio"
"audio2video"
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img23d"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"img2txt"
"img2video"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt23d"
"txt2audio"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
"txt2txt"
"txt2video"
"video23d"
"video2audio"
"video2img"
"video2video"
collectionIds: Array<string>

A list of CollectionId this model belongs to

createdAt: string

The model creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

custom: boolean

Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint

exampleAssetIds: Array<string>

List of all example asset IDs setup by the model owner

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
source: "civitai" | "huggingface" | "other" | "scenario"

The source of the model

One of the following:
"civitai"
"huggingface"
"other"
"scenario"
status: "copying" | "failed" | "new" | 3 more

The model status

One of the following:
"copying"
"failed"
"new"
"trained"
"training"
"training-canceled"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

trainingImagesNumber: number

The total number of training images

type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
updatedAt: string

The model last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

accessRestrictions?: 0 | 100 | 25 | 2 more

The access restrictions of the model 0: Free plan 25: Creator plan 50: Pro plan 75: Team plan 100: Enterprise plan

One of the following:
0
100
25
50
75
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

class?: Class { category, conceptPrompt, modelId, 5 more }

The class of the model

category: string

The category slug of the class (example: “art-style”)

conceptPrompt: string

The concept prompt of the class (example: “a sks character design”)

modelId: string

The model ID of the class (example: “stable-diffusion-v1-5”)

name: string

The class name (example: “Character Design”)

prompt: string

The class prompt (example: “a character design”)

slug: string

The class slug (example: “art-style-character-design”)

status: "published" | "unpublished"

The class status (only published classes are listed, but unpublished classes can still appear in existing models)

One of the following:
"published"
"unpublished"
thumbnails: Array<string>

Some example images URLs to showcase the class

compliantModelIds?: Array<string>

List of base model IDs compliant with the model (example: [“flux.1-dev”, “flux.1-schnell”]) This attribute is mainly used for Flux LoRA models

concepts?: Array<Concept>

The concepts is required for the type model: composition

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

epoch?: string

The epoch of the model. Only available for Flux Lora Trained models. If not set, uses the final model epoch (latest)

epochs?: Array<Epoch>

The epochs of the model. Only available for Flux Lora Trained models.

epoch: string

The epoch hash to identify the epoch

assets?: Array<Asset>

The assets of the epoch if sample prompts as been supplied during training

assetId: string

The AssetId of the image during training (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the asset

inputs?: Array<Input>

The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
modelKeyword?: string

The model keyword, this is a legacy parameter, please use conceptPrompt in parameters

name?: string

The model name (example: “Cinematic Realism”)

negativePromptEmbedding?: string

Fine-tune the model’s inferences with negative prompt embedding

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

parameters?: Parameters { age, batchSize, classPrompt, 29 more }

The parameters of the model

age?: string

Age group of the voice (for professional cloning)

Only available for ElevenLabs voice training

batchSize?: number

The batch size Less steps, and will increase the learning rate

Only available for Flux LoRA training

maximum4
minimum1
classPrompt?: string

The prompt to specify images in the same class as provided instance images

Only available for SD15 training

cloneType?: string

Type of voice cloning: “instant” (fast) or “professional” (higher quality, requires captcha)

Only available for ElevenLabs voice training

conceptPrompt?: string

The prompt with identifier specifying the instance (or subject) of the class (example: “a daiton dog”)

Default value varies depending on the model type:

  • For SD1.5: “daiton” if no class is associated with the model
  • For SDXL: “daiton”
  • For Flux: ""
gender?: string

Gender of the voice (for professional cloning)

Only available for ElevenLabs voice training

language?: string

Language of the audio samples (ISO 639-1 code)

Only available for ElevenLabs voice training

learningRate?: number

Initial learning rate (after the potential warmup period)

Default value varies depending on the model type:

  • For SD1.5 and SDXL: 0.000005
  • For Flux: 0.0001
exclusiveMinimum
minimum0
learningRateTextEncoder?: number

Initial learning rate (after the potential warmup period) for the text encoder

Maximum [Flux LoRA: 0.001] Default [SDXL: 0.00005 | Flux LoRA: 0.00001] Minimum [SDXL: 0 | Flux LoRA: 0.000001]

exclusiveMinimum
maximum0.001
minimum0
learningRateUnet?: number

Initial learning rate (after the potential warmup period) for the UNet

Only available for SDXL LoRA training

exclusiveMinimum
minimum0
lrScheduler?: "constant" | "constant-with-warmup" | "cosine" | 3 more

The scheduler type to use (default: “constant”)

Only available for SD15 and SDXL LoRA training

One of the following:
"constant"
"constant-with-warmup"
"cosine"
"cosine-with-restarts"
"linear"
"polynomial"
maxTrainSteps?: number

Maximum number of training steps to execute (default: varies depending on the model type)

For SDXL LoRA training, please use numTextTrainSteps and numUNetTrainSteps instead

Default value varies depending on the model type:

  • For SD1.5: round((number of training images * 225) / 3)
  • For SDXL: number of training images * 175
  • For Flux: number of training images * 100

Maximum value varies depending on the model type:

  • For SD1.5 and SDXL: [0, 40000]
  • For Flux: [0, 10000]
maximum40000
minimum0
nbEpochs?: number

The number of epochs to train for

Only available for Flux LoRA training

maximum30
minimum1
nbRepeats?: number

The number of times to repeat the training

Only available for Flux LoRA training

maximum30
minimum1
numTextTrainSteps?: number

The number of training steps for the text encoder

Only available for SDXL LoRA training

maximum40000
minimum0
numUNetTrainSteps?: number

The number of training steps for the UNet

Only available for SDXL LoRA training

maximum40000
minimum0
optimizeFor?: "likeness"

Optimize the model training task for a specific type of input images. The available values are:

  • “likeness”: optimize training for likeness or portrait (targets specific transformer blocks)
  • “all”: train all transformer blocks
  • “none”: train no specific transformer blocks

This parameter controls which double and single transformer blocks are trained during the LoRA training process.

Only available for Flux LoRA training

priorLossWeight?: number

The weight of prior preservation loss

Only available for SD15 and SDXL LoRA training

exclusiveMinimum
maximum1.7976931348623157
minimum0
randomCrop?: boolean

Whether to random crop or center crop images before resizing to the working resolution

Only available for SD15 and SDXL LoRA training

randomCropRatio?: number

Ratio of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
randomCropScale?: number

Scale of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
rank?: number

The dimension of the LoRA update matrices

Only available for SDXL (deprecated), Flux LoRA and Musubi training

Default value varies depending on the model type:

  • For SDXL (deprecated): 64
  • For Flux: 16
  • For Musubi: 64

Each trainer enforces its own tighter limit (Flux LoRA: [2; 64], Musubi: [2; 128])

maximum128
minimum2
removeBackgroundNoise?: boolean

Whether to remove background noise from audio samples before cloning. When enabled, each sample must be at least 5 seconds long.

Only available for ElevenLabs voice training

samplePrompts?: Array<string>

The prompts to use for each epoch Only available for Flux LoRA training

sampleSourceImages?: Array<string>

The sample prompt images (AssetIds) paired with samplePrompts Only available for Flux LoRA training Must be the same length as samplePrompts

scaleLr?: boolean

Whether to scale the learning rate

Note: Legacy parameter, will be ignored

Only available for SD15 and SDXL LoRA training

seed?: number

Used to reproduce previous results. Default: randomly generated number.

Only available for SD15 and SDXL LoRA training

maximum9007199254740991
minimum0
textEncoderTrainingRatio?: number

Whether to train the text encoder or not

Example: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps

Note: Legacy parameter, please use numTextTrainSteps and numUNetTrainSteps

Only available for SD15 and SDXL LoRA training

maximum0.99
minimum0
validationFrequency?: number

Validation frequency. Cannot be greater than maxTrainSteps value

Only available for SD15 and SDXL LoRA training

minimum0
validationPrompt?: string

Validation prompt

Only available for SD15 and SDXL LoRA training

voiceDescription?: string

Description of the voice characteristics

Only available for ElevenLabs voice training

wandbKey?: string

The Weights And Bias key to use for logging. The maximum length is 40 characters

parentModelId?: string

The id of the parent model

performanceStats?: PerformanceStats { variants, default }

Aggregated performance stats

variants: Array<Variant>

Performance metrics per variant

capability: string

The generation capability (example: “txt2img”, “img2video”, “txt2audio”)

computedAt: string

When these stats were last computed (ISO date)

variantKey: string

Unique variant identifier (example: “txt2img:1K”, “img2video:2K”, “txt2audio”)

arenaScore?: ArenaScore { arenaCategory, arenaModelName, fetchedAt, 5 more }

External quality score from arena.ai leaderboard

arenaCategory: string

Arena category (example: “text_to_image”, “image_to_video”)

arenaModelName: string

Model name on arena.ai

fetchedAt: string

When this score was last fetched (ISO date)

rank: number

Rank in the arena category

rating: number

ELO rating

ratingLower: number

ELO rating confidence interval lower bound

ratingUpper: number

ELO rating confidence interval upper bound

votes: number

Number of human votes

costPerAssetMaxCU?: number

Maximum cost per output asset (CU)

costPerAssetMinCU?: number

Minimum cost per output asset (CU)

costPerAssetP50CU?: number

Median cost per output asset (CU)

inferenceLatencyP50Sec?: number

Inference latency P50 per output asset (seconds)

inferenceLatencyP75Sec?: number

Inference latency P75 per output asset (seconds)

resolution?: string

The resolution bucket (example: “0.5K”, “1K”, “2K”, “4K”)

totalLatencyP50Sec?: number

Total latency P50 per output asset, including queue time (seconds)

totalLatencyP75Sec?: number

Total latency P75 per output asset, including queue time (seconds)

default?: string

Default variant key for quick model comparison

promptEmbedding?: string

Fine-tune the model’s inferences with prompt embedding

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

softDeletionOn?: string

The date when the model will be soft deleted (only for Free plan)

thumbnail?: Thumbnail { assetId, url }

A thumbnail for your model

assetId: string

The AssetId of the image used as a thumbnail for your model (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for your model

trainingImagePairs?: Array<TrainingImagePair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

trainingImages?: Array<TrainingImage>

The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId

id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

trainingProgress?: TrainingProgress { stage, updatedAt, position, 3 more }

Additional information about the training progress of the model

stage: "pending" | "pending-captcha" | "queued-for-train" | 2 more

The stage of the request

One of the following:
"pending"
"pending-captcha"
"queued-for-train"
"running-train"
"starting-train"
updatedAt: number

Timestamp in milliseconds of the last time the training progress was updated

position?: number

Position of the job in the queue (ie. the number of job in the queue before this one)

progress?: number

The progress of the job

maximum1
minimum0
remainingTimeMs?: number

The remaining time in milliseconds

startedAt?: number

The timestamp in millisecond marking the start of the process

trainingStats?: TrainingStats { endedAt, queueDuration, startedAt, trainDuration }

Additional information about the model’s training

endedAt?: string

The training end time as an ISO date string

queueDuration?: number

The training queued duration in seconds

startedAt?: string

The training start time as an ISO date string

trainDuration?: number

The training duration in seconds

uiConfig?: UiConfig { inputProperties, lorasComponent, presets, 3 more }

The UI configuration for the model

inputProperties?: Record<string, InputProperties>

Configuration for the input properties

collapsed?: boolean
lorasComponent?: LorasComponent { label, modelInput, scaleInput, modelIdInput }

Configuration for the loras component

label: string

The label of the component

modelInput: string

The input name of the model (model_array)

scaleInput: string

The input name of the scale (number_array)

modelIdInput?: string

The input model id (example: a composition or a single LoRA modelId) If specified, the model id will be attached to the output asset as a metadata If the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated

presets?: Array<Preset>

Configuration for the presets

fields: Array<string>
presets: unknown
resolutionComponent?: ResolutionComponent { heightInput, label, presets, widthInput }

Configuration for the resolution component

heightInput: string

The input name of the height

label: string

The label of the component

presets: Array<Preset>

The resolution presets

height: number
label: string
width: number
widthInput: string

The input name of the width

selects?: Record<string, unknown>

Configuration for the selects

triggerGenerate?: TriggerGenerate { label, after, position }

Configuration for the trigger generate button

label: string
after?: string

The ‘name’ of the input where the trigger generate button will be displayed (after the input). Do not specify both position and after.

position?: "bottom" | "top"

The position of the trigger generate button. If position specified, the button will be displayed at the specified position. Do not specify both position and after.

One of the following:
"bottom"
"top"
userId?: string

(Deprecated) The user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

ModelCreateResponse { model }
model: Model { id, capabilities, collectionIds, 35 more }
id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

capabilities: Array<"3d23d" | "audio2audio" | "audio2video" | 29 more>

List of model capabilities (example: [“txt2img”, “img2img”, “txt2img_ip_adapter”, …])

One of the following:
"3d23d"
"audio2audio"
"audio2video"
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img23d"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"img2txt"
"img2video"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt23d"
"txt2audio"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
"txt2txt"
"txt2video"
"video23d"
"video2audio"
"video2img"
"video2video"
collectionIds: Array<string>

A list of CollectionId this model belongs to

createdAt: string

The model creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

custom: boolean

Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint

exampleAssetIds: Array<string>

List of all example asset IDs setup by the model owner

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
source: "civitai" | "huggingface" | "other" | "scenario"

The source of the model

One of the following:
"civitai"
"huggingface"
"other"
"scenario"
status: "copying" | "failed" | "new" | 3 more

The model status

One of the following:
"copying"
"failed"
"new"
"trained"
"training"
"training-canceled"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

trainingImagesNumber: number

The total number of training images

type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
updatedAt: string

The model last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

accessRestrictions?: 0 | 100 | 25 | 2 more

The access restrictions of the model 0: Free plan 25: Creator plan 50: Pro plan 75: Team plan 100: Enterprise plan

One of the following:
0
100
25
50
75
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

class?: Class { category, conceptPrompt, modelId, 5 more }

The class of the model

category: string

The category slug of the class (example: “art-style”)

conceptPrompt: string

The concept prompt of the class (example: “a sks character design”)

modelId: string

The model ID of the class (example: “stable-diffusion-v1-5”)

name: string

The class name (example: “Character Design”)

prompt: string

The class prompt (example: “a character design”)

slug: string

The class slug (example: “art-style-character-design”)

status: "published" | "unpublished"

The class status (only published classes are listed, but unpublished classes can still appear in existing models)

One of the following:
"published"
"unpublished"
thumbnails: Array<string>

Some example images URLs to showcase the class

compliantModelIds?: Array<string>

List of base model IDs compliant with the model (example: [“flux.1-dev”, “flux.1-schnell”]) This attribute is mainly used for Flux LoRA models

concepts?: Array<Concept>

The concepts is required for the type model: composition

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

epoch?: string

The epoch of the model. Only available for Flux Lora Trained models. If not set, uses the final model epoch (latest)

epochs?: Array<Epoch>

The epochs of the model. Only available for Flux Lora Trained models.

epoch: string

The epoch hash to identify the epoch

assets?: Array<Asset>

The assets of the epoch if sample prompts as been supplied during training

assetId: string

The AssetId of the image during training (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the asset

inputs?: Array<Input>

The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
modelKeyword?: string

The model keyword, this is a legacy parameter, please use conceptPrompt in parameters

name?: string

The model name (example: “Cinematic Realism”)

negativePromptEmbedding?: string

Fine-tune the model’s inferences with negative prompt embedding

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

parameters?: Parameters { age, batchSize, classPrompt, 29 more }

The parameters of the model

age?: string

Age group of the voice (for professional cloning)

Only available for ElevenLabs voice training

batchSize?: number

The batch size Less steps, and will increase the learning rate

Only available for Flux LoRA training

maximum4
minimum1
classPrompt?: string

The prompt to specify images in the same class as provided instance images

Only available for SD15 training

cloneType?: string

Type of voice cloning: “instant” (fast) or “professional” (higher quality, requires captcha)

Only available for ElevenLabs voice training

conceptPrompt?: string

The prompt with identifier specifying the instance (or subject) of the class (example: “a daiton dog”)

Default value varies depending on the model type:

  • For SD1.5: “daiton” if no class is associated with the model
  • For SDXL: “daiton”
  • For Flux: ""
gender?: string

Gender of the voice (for professional cloning)

Only available for ElevenLabs voice training

language?: string

Language of the audio samples (ISO 639-1 code)

Only available for ElevenLabs voice training

learningRate?: number

Initial learning rate (after the potential warmup period)

Default value varies depending on the model type:

  • For SD1.5 and SDXL: 0.000005
  • For Flux: 0.0001
exclusiveMinimum
minimum0
learningRateTextEncoder?: number

Initial learning rate (after the potential warmup period) for the text encoder

Maximum [Flux LoRA: 0.001] Default [SDXL: 0.00005 | Flux LoRA: 0.00001] Minimum [SDXL: 0 | Flux LoRA: 0.000001]

exclusiveMinimum
maximum0.001
minimum0
learningRateUnet?: number

Initial learning rate (after the potential warmup period) for the UNet

Only available for SDXL LoRA training

exclusiveMinimum
minimum0
lrScheduler?: "constant" | "constant-with-warmup" | "cosine" | 3 more

The scheduler type to use (default: “constant”)

Only available for SD15 and SDXL LoRA training

One of the following:
"constant"
"constant-with-warmup"
"cosine"
"cosine-with-restarts"
"linear"
"polynomial"
maxTrainSteps?: number

Maximum number of training steps to execute (default: varies depending on the model type)

For SDXL LoRA training, please use numTextTrainSteps and numUNetTrainSteps instead

Default value varies depending on the model type:

  • For SD1.5: round((number of training images * 225) / 3)
  • For SDXL: number of training images * 175
  • For Flux: number of training images * 100

Maximum value varies depending on the model type:

  • For SD1.5 and SDXL: [0, 40000]
  • For Flux: [0, 10000]
maximum40000
minimum0
nbEpochs?: number

The number of epochs to train for

Only available for Flux LoRA training

maximum30
minimum1
nbRepeats?: number

The number of times to repeat the training

Only available for Flux LoRA training

maximum30
minimum1
numTextTrainSteps?: number

The number of training steps for the text encoder

Only available for SDXL LoRA training

maximum40000
minimum0
numUNetTrainSteps?: number

The number of training steps for the UNet

Only available for SDXL LoRA training

maximum40000
minimum0
optimizeFor?: "likeness"

Optimize the model training task for a specific type of input images. The available values are:

  • “likeness”: optimize training for likeness or portrait (targets specific transformer blocks)
  • “all”: train all transformer blocks
  • “none”: train no specific transformer blocks

This parameter controls which double and single transformer blocks are trained during the LoRA training process.

Only available for Flux LoRA training

priorLossWeight?: number

The weight of prior preservation loss

Only available for SD15 and SDXL LoRA training

exclusiveMinimum
maximum1.7976931348623157
minimum0
randomCrop?: boolean

Whether to random crop or center crop images before resizing to the working resolution

Only available for SD15 and SDXL LoRA training

randomCropRatio?: number

Ratio of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
randomCropScale?: number

Scale of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
rank?: number

The dimension of the LoRA update matrices

Only available for SDXL (deprecated), Flux LoRA and Musubi training

Default value varies depending on the model type:

  • For SDXL (deprecated): 64
  • For Flux: 16
  • For Musubi: 64

Each trainer enforces its own tighter limit (Flux LoRA: [2; 64], Musubi: [2; 128])

maximum128
minimum2
removeBackgroundNoise?: boolean

Whether to remove background noise from audio samples before cloning. When enabled, each sample must be at least 5 seconds long.

Only available for ElevenLabs voice training

samplePrompts?: Array<string>

The prompts to use for each epoch Only available for Flux LoRA training

sampleSourceImages?: Array<string>

The sample prompt images (AssetIds) paired with samplePrompts Only available for Flux LoRA training Must be the same length as samplePrompts

scaleLr?: boolean

Whether to scale the learning rate

Note: Legacy parameter, will be ignored

Only available for SD15 and SDXL LoRA training

seed?: number

Used to reproduce previous results. Default: randomly generated number.

Only available for SD15 and SDXL LoRA training

maximum9007199254740991
minimum0
textEncoderTrainingRatio?: number

Whether to train the text encoder or not

Example: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps

Note: Legacy parameter, please use numTextTrainSteps and numUNetTrainSteps

Only available for SD15 and SDXL LoRA training

maximum0.99
minimum0
validationFrequency?: number

Validation frequency. Cannot be greater than maxTrainSteps value

Only available for SD15 and SDXL LoRA training

minimum0
validationPrompt?: string

Validation prompt

Only available for SD15 and SDXL LoRA training

voiceDescription?: string

Description of the voice characteristics

Only available for ElevenLabs voice training

wandbKey?: string

The Weights And Bias key to use for logging. The maximum length is 40 characters

parentModelId?: string

The id of the parent model

performanceStats?: PerformanceStats { variants, default }

Aggregated performance stats

variants: Array<Variant>

Performance metrics per variant

capability: string

The generation capability (example: “txt2img”, “img2video”, “txt2audio”)

computedAt: string

When these stats were last computed (ISO date)

variantKey: string

Unique variant identifier (example: “txt2img:1K”, “img2video:2K”, “txt2audio”)

arenaScore?: ArenaScore { arenaCategory, arenaModelName, fetchedAt, 5 more }

External quality score from arena.ai leaderboard

arenaCategory: string

Arena category (example: “text_to_image”, “image_to_video”)

arenaModelName: string

Model name on arena.ai

fetchedAt: string

When this score was last fetched (ISO date)

rank: number

Rank in the arena category

rating: number

ELO rating

ratingLower: number

ELO rating confidence interval lower bound

ratingUpper: number

ELO rating confidence interval upper bound

votes: number

Number of human votes

costPerAssetMaxCU?: number

Maximum cost per output asset (CU)

costPerAssetMinCU?: number

Minimum cost per output asset (CU)

costPerAssetP50CU?: number

Median cost per output asset (CU)

inferenceLatencyP50Sec?: number

Inference latency P50 per output asset (seconds)

inferenceLatencyP75Sec?: number

Inference latency P75 per output asset (seconds)

resolution?: string

The resolution bucket (example: “0.5K”, “1K”, “2K”, “4K”)

totalLatencyP50Sec?: number

Total latency P50 per output asset, including queue time (seconds)

totalLatencyP75Sec?: number

Total latency P75 per output asset, including queue time (seconds)

default?: string

Default variant key for quick model comparison

promptEmbedding?: string

Fine-tune the model’s inferences with prompt embedding

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

softDeletionOn?: string

The date when the model will be soft deleted (only for Free plan)

thumbnail?: Thumbnail { assetId, url }

A thumbnail for your model

assetId: string

The AssetId of the image used as a thumbnail for your model (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for your model

trainingImagePairs?: Array<TrainingImagePair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

trainingImages?: Array<TrainingImage>

The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId

id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

trainingProgress?: TrainingProgress { stage, updatedAt, position, 3 more }

Additional information about the training progress of the model

stage: "pending" | "pending-captcha" | "queued-for-train" | 2 more

The stage of the request

One of the following:
"pending"
"pending-captcha"
"queued-for-train"
"running-train"
"starting-train"
updatedAt: number

Timestamp in milliseconds of the last time the training progress was updated

position?: number

Position of the job in the queue (ie. the number of job in the queue before this one)

progress?: number

The progress of the job

maximum1
minimum0
remainingTimeMs?: number

The remaining time in milliseconds

startedAt?: number

The timestamp in millisecond marking the start of the process

trainingStats?: TrainingStats { endedAt, queueDuration, startedAt, trainDuration }

Additional information about the model’s training

endedAt?: string

The training end time as an ISO date string

queueDuration?: number

The training queued duration in seconds

startedAt?: string

The training start time as an ISO date string

trainDuration?: number

The training duration in seconds

uiConfig?: UiConfig { inputProperties, lorasComponent, presets, 3 more }

The UI configuration for the model

inputProperties?: Record<string, InputProperties>

Configuration for the input properties

collapsed?: boolean
lorasComponent?: LorasComponent { label, modelInput, scaleInput, modelIdInput }

Configuration for the loras component

label: string

The label of the component

modelInput: string

The input name of the model (model_array)

scaleInput: string

The input name of the scale (number_array)

modelIdInput?: string

The input model id (example: a composition or a single LoRA modelId) If specified, the model id will be attached to the output asset as a metadata If the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated

presets?: Array<Preset>

Configuration for the presets

fields: Array<string>
presets: unknown
resolutionComponent?: ResolutionComponent { heightInput, label, presets, widthInput }

Configuration for the resolution component

heightInput: string

The input name of the height

label: string

The label of the component

presets: Array<Preset>

The resolution presets

height: number
label: string
width: number
widthInput: string

The input name of the width

selects?: Record<string, unknown>

Configuration for the selects

triggerGenerate?: TriggerGenerate { label, after, position }

Configuration for the trigger generate button

label: string
after?: string

The ‘name’ of the input where the trigger generate button will be displayed (after the input). Do not specify both position and after.

position?: "bottom" | "top"

The position of the trigger generate button. If position specified, the button will be displayed at the specified position. Do not specify both position and after.

One of the following:
"bottom"
"top"
userId?: string

(Deprecated) The user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

ModelGetBulkResponse { models }
models: Array<Model>
id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
accessRestrictions?: 0 | 100 | 25 | 2 more

The access restrictions of the model 0: Free plan 25: Creator plan 50: Pro plan 75: Team plan 100: Enterprise plan

One of the following:
0
100
25
50
75
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

capabilities?: Array<"3d23d" | "audio2audio" | "audio2video" | 29 more>

List of model capabilities (example: [“txt2img”, “img2img”, “txt2img_ip_adapter”, …])

One of the following:
"3d23d"
"audio2audio"
"audio2video"
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img23d"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"img2txt"
"img2video"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt23d"
"txt2audio"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
"txt2txt"
"txt2video"
"video23d"
"video2audio"
"video2img"
"video2video"
class?: Class { category, conceptPrompt, modelId, 5 more }

The class of the model

category: string

The category slug of the class (example: “art-style”)

conceptPrompt: string

The concept prompt of the class (example: “a sks character design”)

modelId: string

The model ID of the class (example: “stable-diffusion-v1-5”)

name: string

The class name (example: “Character Design”)

prompt: string

The class prompt (example: “a character design”)

slug: string

The class slug (example: “art-style-character-design”)

status: "published" | "unpublished"

The class status (only published classes are listed, but unpublished classes can still appear in existing models)

One of the following:
"published"
"unpublished"
thumbnails: Array<string>

Some example images URLs to showcase the class

collectionIds?: Array<string>

A list of CollectionId this model belongs to

compliantModelIds?: Array<string>

List of base model IDs compliant with the model (example: [“flux.1-dev”, “flux.1-schnell”]) This attribute is mainly used for Flux LoRA models

concepts?: Array<Concept>

The concepts is required for the type model: composition

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

createdAt?: string

The model creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

custom?: boolean

Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint

epoch?: string

The epoch of the model. Only available for Flux Lora Trained models. If not set, uses the final model epoch (latest)

epochs?: Array<Epoch>

The epochs of the model. Only available for Flux Lora Trained models.

epoch: string

The epoch hash to identify the epoch

assets?: Array<Asset>

The assets of the epoch if sample prompts as been supplied during training

assetId: string

The AssetId of the image during training (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the asset

exampleAssetIds?: Array<string>

List of all example asset IDs setup by the model owner

inputs?: Array<Input>

The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
modelKeyword?: string

The model keyword, this is a legacy parameter, please use conceptPrompt in parameters

name?: string

The model name (example: “Cinematic Realism”)

negativePromptEmbedding?: string

Fine-tune the model’s inferences with negative prompt embedding

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

parameters?: Parameters { age, batchSize, classPrompt, 29 more }

The parameters of the model

age?: string

Age group of the voice (for professional cloning)

Only available for ElevenLabs voice training

batchSize?: number

The batch size Less steps, and will increase the learning rate

Only available for Flux LoRA training

maximum4
minimum1
classPrompt?: string

The prompt to specify images in the same class as provided instance images

Only available for SD15 training

cloneType?: string

Type of voice cloning: “instant” (fast) or “professional” (higher quality, requires captcha)

Only available for ElevenLabs voice training

conceptPrompt?: string

The prompt with identifier specifying the instance (or subject) of the class (example: “a daiton dog”)

Default value varies depending on the model type:

  • For SD1.5: “daiton” if no class is associated with the model
  • For SDXL: “daiton”
  • For Flux: ""
gender?: string

Gender of the voice (for professional cloning)

Only available for ElevenLabs voice training

language?: string

Language of the audio samples (ISO 639-1 code)

Only available for ElevenLabs voice training

learningRate?: number

Initial learning rate (after the potential warmup period)

Default value varies depending on the model type:

  • For SD1.5 and SDXL: 0.000005
  • For Flux: 0.0001
exclusiveMinimum
minimum0
learningRateTextEncoder?: number

Initial learning rate (after the potential warmup period) for the text encoder

Maximum [Flux LoRA: 0.001] Default [SDXL: 0.00005 | Flux LoRA: 0.00001] Minimum [SDXL: 0 | Flux LoRA: 0.000001]

exclusiveMinimum
maximum0.001
minimum0
learningRateUnet?: number

Initial learning rate (after the potential warmup period) for the UNet

Only available for SDXL LoRA training

exclusiveMinimum
minimum0
lrScheduler?: "constant" | "constant-with-warmup" | "cosine" | 3 more

The scheduler type to use (default: “constant”)

Only available for SD15 and SDXL LoRA training

One of the following:
"constant"
"constant-with-warmup"
"cosine"
"cosine-with-restarts"
"linear"
"polynomial"
maxTrainSteps?: number

Maximum number of training steps to execute (default: varies depending on the model type)

For SDXL LoRA training, please use numTextTrainSteps and numUNetTrainSteps instead

Default value varies depending on the model type:

  • For SD1.5: round((number of training images * 225) / 3)
  • For SDXL: number of training images * 175
  • For Flux: number of training images * 100

Maximum value varies depending on the model type:

  • For SD1.5 and SDXL: [0, 40000]
  • For Flux: [0, 10000]
maximum40000
minimum0
nbEpochs?: number

The number of epochs to train for

Only available for Flux LoRA training

maximum30
minimum1
nbRepeats?: number

The number of times to repeat the training

Only available for Flux LoRA training

maximum30
minimum1
numTextTrainSteps?: number

The number of training steps for the text encoder

Only available for SDXL LoRA training

maximum40000
minimum0
numUNetTrainSteps?: number

The number of training steps for the UNet

Only available for SDXL LoRA training

maximum40000
minimum0
optimizeFor?: "likeness"

Optimize the model training task for a specific type of input images. The available values are:

  • “likeness”: optimize training for likeness or portrait (targets specific transformer blocks)
  • “all”: train all transformer blocks
  • “none”: train no specific transformer blocks

This parameter controls which double and single transformer blocks are trained during the LoRA training process.

Only available for Flux LoRA training

priorLossWeight?: number

The weight of prior preservation loss

Only available for SD15 and SDXL LoRA training

exclusiveMinimum
maximum1.7976931348623157
minimum0
randomCrop?: boolean

Whether to random crop or center crop images before resizing to the working resolution

Only available for SD15 and SDXL LoRA training

randomCropRatio?: number

Ratio of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
randomCropScale?: number

Scale of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
rank?: number

The dimension of the LoRA update matrices

Only available for SDXL (deprecated), Flux LoRA and Musubi training

Default value varies depending on the model type:

  • For SDXL (deprecated): 64
  • For Flux: 16
  • For Musubi: 64

Each trainer enforces its own tighter limit (Flux LoRA: [2; 64], Musubi: [2; 128])

maximum128
minimum2
removeBackgroundNoise?: boolean

Whether to remove background noise from audio samples before cloning. When enabled, each sample must be at least 5 seconds long.

Only available for ElevenLabs voice training

samplePrompts?: Array<string>

The prompts to use for each epoch Only available for Flux LoRA training

sampleSourceImages?: Array<string>

The sample prompt images (AssetIds) paired with samplePrompts Only available for Flux LoRA training Must be the same length as samplePrompts

scaleLr?: boolean

Whether to scale the learning rate

Note: Legacy parameter, will be ignored

Only available for SD15 and SDXL LoRA training

seed?: number

Used to reproduce previous results. Default: randomly generated number.

Only available for SD15 and SDXL LoRA training

maximum9007199254740991
minimum0
textEncoderTrainingRatio?: number

Whether to train the text encoder or not

Example: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps

Note: Legacy parameter, please use numTextTrainSteps and numUNetTrainSteps

Only available for SD15 and SDXL LoRA training

maximum0.99
minimum0
validationFrequency?: number

Validation frequency. Cannot be greater than maxTrainSteps value

Only available for SD15 and SDXL LoRA training

minimum0
validationPrompt?: string

Validation prompt

Only available for SD15 and SDXL LoRA training

voiceDescription?: string

Description of the voice characteristics

Only available for ElevenLabs voice training

wandbKey?: string

The Weights And Bias key to use for logging. The maximum length is 40 characters

parentModelId?: string

The id of the parent model

performanceStats?: PerformanceStats { variants, default }

Aggregated performance stats

variants: Array<Variant>

Performance metrics per variant

capability: string

The generation capability (example: “txt2img”, “img2video”, “txt2audio”)

computedAt: string

When these stats were last computed (ISO date)

variantKey: string

Unique variant identifier (example: “txt2img:1K”, “img2video:2K”, “txt2audio”)

arenaScore?: ArenaScore { arenaCategory, arenaModelName, fetchedAt, 5 more }

External quality score from arena.ai leaderboard

arenaCategory: string

Arena category (example: “text_to_image”, “image_to_video”)

arenaModelName: string

Model name on arena.ai

fetchedAt: string

When this score was last fetched (ISO date)

rank: number

Rank in the arena category

rating: number

ELO rating

ratingLower: number

ELO rating confidence interval lower bound

ratingUpper: number

ELO rating confidence interval upper bound

votes: number

Number of human votes

costPerAssetMaxCU?: number

Maximum cost per output asset (CU)

costPerAssetMinCU?: number

Minimum cost per output asset (CU)

costPerAssetP50CU?: number

Median cost per output asset (CU)

inferenceLatencyP50Sec?: number

Inference latency P50 per output asset (seconds)

inferenceLatencyP75Sec?: number

Inference latency P75 per output asset (seconds)

resolution?: string

The resolution bucket (example: “0.5K”, “1K”, “2K”, “4K”)

totalLatencyP50Sec?: number

Total latency P50 per output asset, including queue time (seconds)

totalLatencyP75Sec?: number

Total latency P75 per output asset, including queue time (seconds)

default?: string

Default variant key for quick model comparison

promptEmbedding?: string

Fine-tune the model’s inferences with prompt embedding

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

softDeletionOn?: string

The date when the model will be soft deleted (only for Free plan)

source?: "civitai" | "huggingface" | "other" | "scenario"

The source of the model

One of the following:
"civitai"
"huggingface"
"other"
"scenario"
status?: "copying" | "failed" | "new" | 3 more

The model status

One of the following:
"copying"
"failed"
"new"
"trained"
"training"
"training-canceled"
tags?: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

thumbnail?: Thumbnail { assetId, url }

A thumbnail for your model

assetId: string

The AssetId of the image used as a thumbnail for your model (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for your model

trainingImagePairs?: Array<TrainingImagePair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

trainingImages?: Array<TrainingImage>

The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId

id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

trainingImagesNumber?: number

The total number of training images

trainingProgress?: TrainingProgress { stage, updatedAt, position, 3 more }

Additional information about the training progress of the model

stage: "pending" | "pending-captcha" | "queued-for-train" | 2 more

The stage of the request

One of the following:
"pending"
"pending-captcha"
"queued-for-train"
"running-train"
"starting-train"
updatedAt: number

Timestamp in milliseconds of the last time the training progress was updated

position?: number

Position of the job in the queue (ie. the number of job in the queue before this one)

progress?: number

The progress of the job

maximum1
minimum0
remainingTimeMs?: number

The remaining time in milliseconds

startedAt?: number

The timestamp in millisecond marking the start of the process

trainingStats?: TrainingStats { endedAt, queueDuration, startedAt, trainDuration }

Additional information about the model’s training

endedAt?: string

The training end time as an ISO date string

queueDuration?: number

The training queued duration in seconds

startedAt?: string

The training start time as an ISO date string

trainDuration?: number

The training duration in seconds

uiConfig?: UiConfig { inputProperties, lorasComponent, presets, 3 more }

The UI configuration for the model

inputProperties?: Record<string, InputProperties>

Configuration for the input properties

collapsed?: boolean
lorasComponent?: LorasComponent { label, modelInput, scaleInput, modelIdInput }

Configuration for the loras component

label: string

The label of the component

modelInput: string

The input name of the model (model_array)

scaleInput: string

The input name of the scale (number_array)

modelIdInput?: string

The input model id (example: a composition or a single LoRA modelId) If specified, the model id will be attached to the output asset as a metadata If the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated

presets?: Array<Preset>

Configuration for the presets

fields: Array<string>
presets: unknown
resolutionComponent?: ResolutionComponent { heightInput, label, presets, widthInput }

Configuration for the resolution component

heightInput: string

The input name of the height

label: string

The label of the component

presets: Array<Preset>

The resolution presets

height: number
label: string
width: number
widthInput: string

The input name of the width

selects?: Record<string, unknown>

Configuration for the selects

triggerGenerate?: TriggerGenerate { label, after, position }

Configuration for the trigger generate button

label: string
after?: string

The ‘name’ of the input where the trigger generate button will be displayed (after the input). Do not specify both position and after.

position?: "bottom" | "top"

The position of the trigger generate button. If position specified, the button will be displayed at the specified position. Do not specify both position and after.

One of the following:
"bottom"
"top"
updatedAt?: string

The model last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

userId?: string

(Deprecated) The user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

ModelRetrieveResponse { model }
model: Model { id, capabilities, collectionIds, 35 more }
id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

capabilities: Array<"3d23d" | "audio2audio" | "audio2video" | 29 more>

List of model capabilities (example: [“txt2img”, “img2img”, “txt2img_ip_adapter”, …])

One of the following:
"3d23d"
"audio2audio"
"audio2video"
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img23d"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"img2txt"
"img2video"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt23d"
"txt2audio"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
"txt2txt"
"txt2video"
"video23d"
"video2audio"
"video2img"
"video2video"
collectionIds: Array<string>

A list of CollectionId this model belongs to

createdAt: string

The model creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

custom: boolean

Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint

exampleAssetIds: Array<string>

List of all example asset IDs setup by the model owner

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
source: "civitai" | "huggingface" | "other" | "scenario"

The source of the model

One of the following:
"civitai"
"huggingface"
"other"
"scenario"
status: "copying" | "failed" | "new" | 3 more

The model status

One of the following:
"copying"
"failed"
"new"
"trained"
"training"
"training-canceled"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

trainingImagesNumber: number

The total number of training images

type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
updatedAt: string

The model last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

accessRestrictions?: 0 | 100 | 25 | 2 more

The access restrictions of the model 0: Free plan 25: Creator plan 50: Pro plan 75: Team plan 100: Enterprise plan

One of the following:
0
100
25
50
75
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

class?: Class { category, conceptPrompt, modelId, 5 more }

The class of the model

category: string

The category slug of the class (example: “art-style”)

conceptPrompt: string

The concept prompt of the class (example: “a sks character design”)

modelId: string

The model ID of the class (example: “stable-diffusion-v1-5”)

name: string

The class name (example: “Character Design”)

prompt: string

The class prompt (example: “a character design”)

slug: string

The class slug (example: “art-style-character-design”)

status: "published" | "unpublished"

The class status (only published classes are listed, but unpublished classes can still appear in existing models)

One of the following:
"published"
"unpublished"
thumbnails: Array<string>

Some example images URLs to showcase the class

compliantModelIds?: Array<string>

List of base model IDs compliant with the model (example: [“flux.1-dev”, “flux.1-schnell”]) This attribute is mainly used for Flux LoRA models

concepts?: Array<Concept>

The concepts is required for the type model: composition

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

epoch?: string

The epoch of the model. Only available for Flux Lora Trained models. If not set, uses the final model epoch (latest)

epochs?: Array<Epoch>

The epochs of the model. Only available for Flux Lora Trained models.

epoch: string

The epoch hash to identify the epoch

assets?: Array<Asset>

The assets of the epoch if sample prompts as been supplied during training

assetId: string

The AssetId of the image during training (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the asset

inputs?: Array<Input>

The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
modelKeyword?: string

The model keyword, this is a legacy parameter, please use conceptPrompt in parameters

name?: string

The model name (example: “Cinematic Realism”)

negativePromptEmbedding?: string

Fine-tune the model’s inferences with negative prompt embedding

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

parameters?: Parameters { age, batchSize, classPrompt, 29 more }

The parameters of the model

age?: string

Age group of the voice (for professional cloning)

Only available for ElevenLabs voice training

batchSize?: number

The batch size Less steps, and will increase the learning rate

Only available for Flux LoRA training

maximum4
minimum1
classPrompt?: string

The prompt to specify images in the same class as provided instance images

Only available for SD15 training

cloneType?: string

Type of voice cloning: “instant” (fast) or “professional” (higher quality, requires captcha)

Only available for ElevenLabs voice training

conceptPrompt?: string

The prompt with identifier specifying the instance (or subject) of the class (example: “a daiton dog”)

Default value varies depending on the model type:

  • For SD1.5: “daiton” if no class is associated with the model
  • For SDXL: “daiton”
  • For Flux: ""
gender?: string

Gender of the voice (for professional cloning)

Only available for ElevenLabs voice training

language?: string

Language of the audio samples (ISO 639-1 code)

Only available for ElevenLabs voice training

learningRate?: number

Initial learning rate (after the potential warmup period)

Default value varies depending on the model type:

  • For SD1.5 and SDXL: 0.000005
  • For Flux: 0.0001
exclusiveMinimum
minimum0
learningRateTextEncoder?: number

Initial learning rate (after the potential warmup period) for the text encoder

Maximum [Flux LoRA: 0.001] Default [SDXL: 0.00005 | Flux LoRA: 0.00001] Minimum [SDXL: 0 | Flux LoRA: 0.000001]

exclusiveMinimum
maximum0.001
minimum0
learningRateUnet?: number

Initial learning rate (after the potential warmup period) for the UNet

Only available for SDXL LoRA training

exclusiveMinimum
minimum0
lrScheduler?: "constant" | "constant-with-warmup" | "cosine" | 3 more

The scheduler type to use (default: “constant”)

Only available for SD15 and SDXL LoRA training

One of the following:
"constant"
"constant-with-warmup"
"cosine"
"cosine-with-restarts"
"linear"
"polynomial"
maxTrainSteps?: number

Maximum number of training steps to execute (default: varies depending on the model type)

For SDXL LoRA training, please use numTextTrainSteps and numUNetTrainSteps instead

Default value varies depending on the model type:

  • For SD1.5: round((number of training images * 225) / 3)
  • For SDXL: number of training images * 175
  • For Flux: number of training images * 100

Maximum value varies depending on the model type:

  • For SD1.5 and SDXL: [0, 40000]
  • For Flux: [0, 10000]
maximum40000
minimum0
nbEpochs?: number

The number of epochs to train for

Only available for Flux LoRA training

maximum30
minimum1
nbRepeats?: number

The number of times to repeat the training

Only available for Flux LoRA training

maximum30
minimum1
numTextTrainSteps?: number

The number of training steps for the text encoder

Only available for SDXL LoRA training

maximum40000
minimum0
numUNetTrainSteps?: number

The number of training steps for the UNet

Only available for SDXL LoRA training

maximum40000
minimum0
optimizeFor?: "likeness"

Optimize the model training task for a specific type of input images. The available values are:

  • “likeness”: optimize training for likeness or portrait (targets specific transformer blocks)
  • “all”: train all transformer blocks
  • “none”: train no specific transformer blocks

This parameter controls which double and single transformer blocks are trained during the LoRA training process.

Only available for Flux LoRA training

priorLossWeight?: number

The weight of prior preservation loss

Only available for SD15 and SDXL LoRA training

exclusiveMinimum
maximum1.7976931348623157
minimum0
randomCrop?: boolean

Whether to random crop or center crop images before resizing to the working resolution

Only available for SD15 and SDXL LoRA training

randomCropRatio?: number

Ratio of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
randomCropScale?: number

Scale of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
rank?: number

The dimension of the LoRA update matrices

Only available for SDXL (deprecated), Flux LoRA and Musubi training

Default value varies depending on the model type:

  • For SDXL (deprecated): 64
  • For Flux: 16
  • For Musubi: 64

Each trainer enforces its own tighter limit (Flux LoRA: [2; 64], Musubi: [2; 128])

maximum128
minimum2
removeBackgroundNoise?: boolean

Whether to remove background noise from audio samples before cloning. When enabled, each sample must be at least 5 seconds long.

Only available for ElevenLabs voice training

samplePrompts?: Array<string>

The prompts to use for each epoch Only available for Flux LoRA training

sampleSourceImages?: Array<string>

The sample prompt images (AssetIds) paired with samplePrompts Only available for Flux LoRA training Must be the same length as samplePrompts

scaleLr?: boolean

Whether to scale the learning rate

Note: Legacy parameter, will be ignored

Only available for SD15 and SDXL LoRA training

seed?: number

Used to reproduce previous results. Default: randomly generated number.

Only available for SD15 and SDXL LoRA training

maximum9007199254740991
minimum0
textEncoderTrainingRatio?: number

Whether to train the text encoder or not

Example: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps

Note: Legacy parameter, please use numTextTrainSteps and numUNetTrainSteps

Only available for SD15 and SDXL LoRA training

maximum0.99
minimum0
validationFrequency?: number

Validation frequency. Cannot be greater than maxTrainSteps value

Only available for SD15 and SDXL LoRA training

minimum0
validationPrompt?: string

Validation prompt

Only available for SD15 and SDXL LoRA training

voiceDescription?: string

Description of the voice characteristics

Only available for ElevenLabs voice training

wandbKey?: string

The Weights And Bias key to use for logging. The maximum length is 40 characters

parentModelId?: string

The id of the parent model

performanceStats?: PerformanceStats { variants, default }

Aggregated performance stats

variants: Array<Variant>

Performance metrics per variant

capability: string

The generation capability (example: “txt2img”, “img2video”, “txt2audio”)

computedAt: string

When these stats were last computed (ISO date)

variantKey: string

Unique variant identifier (example: “txt2img:1K”, “img2video:2K”, “txt2audio”)

arenaScore?: ArenaScore { arenaCategory, arenaModelName, fetchedAt, 5 more }

External quality score from arena.ai leaderboard

arenaCategory: string

Arena category (example: “text_to_image”, “image_to_video”)

arenaModelName: string

Model name on arena.ai

fetchedAt: string

When this score was last fetched (ISO date)

rank: number

Rank in the arena category

rating: number

ELO rating

ratingLower: number

ELO rating confidence interval lower bound

ratingUpper: number

ELO rating confidence interval upper bound

votes: number

Number of human votes

costPerAssetMaxCU?: number

Maximum cost per output asset (CU)

costPerAssetMinCU?: number

Minimum cost per output asset (CU)

costPerAssetP50CU?: number

Median cost per output asset (CU)

inferenceLatencyP50Sec?: number

Inference latency P50 per output asset (seconds)

inferenceLatencyP75Sec?: number

Inference latency P75 per output asset (seconds)

resolution?: string

The resolution bucket (example: “0.5K”, “1K”, “2K”, “4K”)

totalLatencyP50Sec?: number

Total latency P50 per output asset, including queue time (seconds)

totalLatencyP75Sec?: number

Total latency P75 per output asset, including queue time (seconds)

default?: string

Default variant key for quick model comparison

promptEmbedding?: string

Fine-tune the model’s inferences with prompt embedding

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

softDeletionOn?: string

The date when the model will be soft deleted (only for Free plan)

thumbnail?: Thumbnail { assetId, url }

A thumbnail for your model

assetId: string

The AssetId of the image used as a thumbnail for your model (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for your model

trainingImagePairs?: Array<TrainingImagePair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

trainingImages?: Array<TrainingImage>

The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId

id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

trainingProgress?: TrainingProgress { stage, updatedAt, position, 3 more }

Additional information about the training progress of the model

stage: "pending" | "pending-captcha" | "queued-for-train" | 2 more

The stage of the request

One of the following:
"pending"
"pending-captcha"
"queued-for-train"
"running-train"
"starting-train"
updatedAt: number

Timestamp in milliseconds of the last time the training progress was updated

position?: number

Position of the job in the queue (ie. the number of job in the queue before this one)

progress?: number

The progress of the job

maximum1
minimum0
remainingTimeMs?: number

The remaining time in milliseconds

startedAt?: number

The timestamp in millisecond marking the start of the process

trainingStats?: TrainingStats { endedAt, queueDuration, startedAt, trainDuration }

Additional information about the model’s training

endedAt?: string

The training end time as an ISO date string

queueDuration?: number

The training queued duration in seconds

startedAt?: string

The training start time as an ISO date string

trainDuration?: number

The training duration in seconds

uiConfig?: UiConfig { inputProperties, lorasComponent, presets, 3 more }

The UI configuration for the model

inputProperties?: Record<string, InputProperties>

Configuration for the input properties

collapsed?: boolean
lorasComponent?: LorasComponent { label, modelInput, scaleInput, modelIdInput }

Configuration for the loras component

label: string

The label of the component

modelInput: string

The input name of the model (model_array)

scaleInput: string

The input name of the scale (number_array)

modelIdInput?: string

The input model id (example: a composition or a single LoRA modelId) If specified, the model id will be attached to the output asset as a metadata If the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated

presets?: Array<Preset>

Configuration for the presets

fields: Array<string>
presets: unknown
resolutionComponent?: ResolutionComponent { heightInput, label, presets, widthInput }

Configuration for the resolution component

heightInput: string

The input name of the height

label: string

The label of the component

presets: Array<Preset>

The resolution presets

height: number
label: string
width: number
widthInput: string

The input name of the width

selects?: Record<string, unknown>

Configuration for the selects

triggerGenerate?: TriggerGenerate { label, after, position }

Configuration for the trigger generate button

label: string
after?: string

The ‘name’ of the input where the trigger generate button will be displayed (after the input). Do not specify both position and after.

position?: "bottom" | "top"

The position of the trigger generate button. If position specified, the button will be displayed at the specified position. Do not specify both position and after.

One of the following:
"bottom"
"top"
userId?: string

(Deprecated) The user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

ModelUpdateResponse { model }
model: Model { id, capabilities, collectionIds, 35 more }
id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

capabilities: Array<"3d23d" | "audio2audio" | "audio2video" | 29 more>

List of model capabilities (example: [“txt2img”, “img2img”, “txt2img_ip_adapter”, …])

One of the following:
"3d23d"
"audio2audio"
"audio2video"
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img23d"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"img2txt"
"img2video"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt23d"
"txt2audio"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
"txt2txt"
"txt2video"
"video23d"
"video2audio"
"video2img"
"video2video"
collectionIds: Array<string>

A list of CollectionId this model belongs to

createdAt: string

The model creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

custom: boolean

Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint

exampleAssetIds: Array<string>

List of all example asset IDs setup by the model owner

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
source: "civitai" | "huggingface" | "other" | "scenario"

The source of the model

One of the following:
"civitai"
"huggingface"
"other"
"scenario"
status: "copying" | "failed" | "new" | 3 more

The model status

One of the following:
"copying"
"failed"
"new"
"trained"
"training"
"training-canceled"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

trainingImagesNumber: number

The total number of training images

type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
updatedAt: string

The model last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

accessRestrictions?: 0 | 100 | 25 | 2 more

The access restrictions of the model 0: Free plan 25: Creator plan 50: Pro plan 75: Team plan 100: Enterprise plan

One of the following:
0
100
25
50
75
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

class?: Class { category, conceptPrompt, modelId, 5 more }

The class of the model

category: string

The category slug of the class (example: “art-style”)

conceptPrompt: string

The concept prompt of the class (example: “a sks character design”)

modelId: string

The model ID of the class (example: “stable-diffusion-v1-5”)

name: string

The class name (example: “Character Design”)

prompt: string

The class prompt (example: “a character design”)

slug: string

The class slug (example: “art-style-character-design”)

status: "published" | "unpublished"

The class status (only published classes are listed, but unpublished classes can still appear in existing models)

One of the following:
"published"
"unpublished"
thumbnails: Array<string>

Some example images URLs to showcase the class

compliantModelIds?: Array<string>

List of base model IDs compliant with the model (example: [“flux.1-dev”, “flux.1-schnell”]) This attribute is mainly used for Flux LoRA models

concepts?: Array<Concept>

The concepts is required for the type model: composition

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

epoch?: string

The epoch of the model. Only available for Flux Lora Trained models. If not set, uses the final model epoch (latest)

epochs?: Array<Epoch>

The epochs of the model. Only available for Flux Lora Trained models.

epoch: string

The epoch hash to identify the epoch

assets?: Array<Asset>

The assets of the epoch if sample prompts as been supplied during training

assetId: string

The AssetId of the image during training (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the asset

inputs?: Array<Input>

The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
modelKeyword?: string

The model keyword, this is a legacy parameter, please use conceptPrompt in parameters

name?: string

The model name (example: “Cinematic Realism”)

negativePromptEmbedding?: string

Fine-tune the model’s inferences with negative prompt embedding

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

parameters?: Parameters { age, batchSize, classPrompt, 29 more }

The parameters of the model

age?: string

Age group of the voice (for professional cloning)

Only available for ElevenLabs voice training

batchSize?: number

The batch size Less steps, and will increase the learning rate

Only available for Flux LoRA training

maximum4
minimum1
classPrompt?: string

The prompt to specify images in the same class as provided instance images

Only available for SD15 training

cloneType?: string

Type of voice cloning: “instant” (fast) or “professional” (higher quality, requires captcha)

Only available for ElevenLabs voice training

conceptPrompt?: string

The prompt with identifier specifying the instance (or subject) of the class (example: “a daiton dog”)

Default value varies depending on the model type:

  • For SD1.5: “daiton” if no class is associated with the model
  • For SDXL: “daiton”
  • For Flux: ""
gender?: string

Gender of the voice (for professional cloning)

Only available for ElevenLabs voice training

language?: string

Language of the audio samples (ISO 639-1 code)

Only available for ElevenLabs voice training

learningRate?: number

Initial learning rate (after the potential warmup period)

Default value varies depending on the model type:

  • For SD1.5 and SDXL: 0.000005
  • For Flux: 0.0001
exclusiveMinimum
minimum0
learningRateTextEncoder?: number

Initial learning rate (after the potential warmup period) for the text encoder

Maximum [Flux LoRA: 0.001] Default [SDXL: 0.00005 | Flux LoRA: 0.00001] Minimum [SDXL: 0 | Flux LoRA: 0.000001]

exclusiveMinimum
maximum0.001
minimum0
learningRateUnet?: number

Initial learning rate (after the potential warmup period) for the UNet

Only available for SDXL LoRA training

exclusiveMinimum
minimum0
lrScheduler?: "constant" | "constant-with-warmup" | "cosine" | 3 more

The scheduler type to use (default: “constant”)

Only available for SD15 and SDXL LoRA training

One of the following:
"constant"
"constant-with-warmup"
"cosine"
"cosine-with-restarts"
"linear"
"polynomial"
maxTrainSteps?: number

Maximum number of training steps to execute (default: varies depending on the model type)

For SDXL LoRA training, please use numTextTrainSteps and numUNetTrainSteps instead

Default value varies depending on the model type:

  • For SD1.5: round((number of training images * 225) / 3)
  • For SDXL: number of training images * 175
  • For Flux: number of training images * 100

Maximum value varies depending on the model type:

  • For SD1.5 and SDXL: [0, 40000]
  • For Flux: [0, 10000]
maximum40000
minimum0
nbEpochs?: number

The number of epochs to train for

Only available for Flux LoRA training

maximum30
minimum1
nbRepeats?: number

The number of times to repeat the training

Only available for Flux LoRA training

maximum30
minimum1
numTextTrainSteps?: number

The number of training steps for the text encoder

Only available for SDXL LoRA training

maximum40000
minimum0
numUNetTrainSteps?: number

The number of training steps for the UNet

Only available for SDXL LoRA training

maximum40000
minimum0
optimizeFor?: "likeness"

Optimize the model training task for a specific type of input images. The available values are:

  • “likeness”: optimize training for likeness or portrait (targets specific transformer blocks)
  • “all”: train all transformer blocks
  • “none”: train no specific transformer blocks

This parameter controls which double and single transformer blocks are trained during the LoRA training process.

Only available for Flux LoRA training

priorLossWeight?: number

The weight of prior preservation loss

Only available for SD15 and SDXL LoRA training

exclusiveMinimum
maximum1.7976931348623157
minimum0
randomCrop?: boolean

Whether to random crop or center crop images before resizing to the working resolution

Only available for SD15 and SDXL LoRA training

randomCropRatio?: number

Ratio of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
randomCropScale?: number

Scale of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
rank?: number

The dimension of the LoRA update matrices

Only available for SDXL (deprecated), Flux LoRA and Musubi training

Default value varies depending on the model type:

  • For SDXL (deprecated): 64
  • For Flux: 16
  • For Musubi: 64

Each trainer enforces its own tighter limit (Flux LoRA: [2; 64], Musubi: [2; 128])

maximum128
minimum2
removeBackgroundNoise?: boolean

Whether to remove background noise from audio samples before cloning. When enabled, each sample must be at least 5 seconds long.

Only available for ElevenLabs voice training

samplePrompts?: Array<string>

The prompts to use for each epoch Only available for Flux LoRA training

sampleSourceImages?: Array<string>

The sample prompt images (AssetIds) paired with samplePrompts Only available for Flux LoRA training Must be the same length as samplePrompts

scaleLr?: boolean

Whether to scale the learning rate

Note: Legacy parameter, will be ignored

Only available for SD15 and SDXL LoRA training

seed?: number

Used to reproduce previous results. Default: randomly generated number.

Only available for SD15 and SDXL LoRA training

maximum9007199254740991
minimum0
textEncoderTrainingRatio?: number

Whether to train the text encoder or not

Example: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps

Note: Legacy parameter, please use numTextTrainSteps and numUNetTrainSteps

Only available for SD15 and SDXL LoRA training

maximum0.99
minimum0
validationFrequency?: number

Validation frequency. Cannot be greater than maxTrainSteps value

Only available for SD15 and SDXL LoRA training

minimum0
validationPrompt?: string

Validation prompt

Only available for SD15 and SDXL LoRA training

voiceDescription?: string

Description of the voice characteristics

Only available for ElevenLabs voice training

wandbKey?: string

The Weights And Bias key to use for logging. The maximum length is 40 characters

parentModelId?: string

The id of the parent model

performanceStats?: PerformanceStats { variants, default }

Aggregated performance stats

variants: Array<Variant>

Performance metrics per variant

capability: string

The generation capability (example: “txt2img”, “img2video”, “txt2audio”)

computedAt: string

When these stats were last computed (ISO date)

variantKey: string

Unique variant identifier (example: “txt2img:1K”, “img2video:2K”, “txt2audio”)

arenaScore?: ArenaScore { arenaCategory, arenaModelName, fetchedAt, 5 more }

External quality score from arena.ai leaderboard

arenaCategory: string

Arena category (example: “text_to_image”, “image_to_video”)

arenaModelName: string

Model name on arena.ai

fetchedAt: string

When this score was last fetched (ISO date)

rank: number

Rank in the arena category

rating: number

ELO rating

ratingLower: number

ELO rating confidence interval lower bound

ratingUpper: number

ELO rating confidence interval upper bound

votes: number

Number of human votes

costPerAssetMaxCU?: number

Maximum cost per output asset (CU)

costPerAssetMinCU?: number

Minimum cost per output asset (CU)

costPerAssetP50CU?: number

Median cost per output asset (CU)

inferenceLatencyP50Sec?: number

Inference latency P50 per output asset (seconds)

inferenceLatencyP75Sec?: number

Inference latency P75 per output asset (seconds)

resolution?: string

The resolution bucket (example: “0.5K”, “1K”, “2K”, “4K”)

totalLatencyP50Sec?: number

Total latency P50 per output asset, including queue time (seconds)

totalLatencyP75Sec?: number

Total latency P75 per output asset, including queue time (seconds)

default?: string

Default variant key for quick model comparison

promptEmbedding?: string

Fine-tune the model’s inferences with prompt embedding

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

softDeletionOn?: string

The date when the model will be soft deleted (only for Free plan)

thumbnail?: Thumbnail { assetId, url }

A thumbnail for your model

assetId: string

The AssetId of the image used as a thumbnail for your model (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for your model

trainingImagePairs?: Array<TrainingImagePair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

trainingImages?: Array<TrainingImage>

The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId

id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

trainingProgress?: TrainingProgress { stage, updatedAt, position, 3 more }

Additional information about the training progress of the model

stage: "pending" | "pending-captcha" | "queued-for-train" | 2 more

The stage of the request

One of the following:
"pending"
"pending-captcha"
"queued-for-train"
"running-train"
"starting-train"
updatedAt: number

Timestamp in milliseconds of the last time the training progress was updated

position?: number

Position of the job in the queue (ie. the number of job in the queue before this one)

progress?: number

The progress of the job

maximum1
minimum0
remainingTimeMs?: number

The remaining time in milliseconds

startedAt?: number

The timestamp in millisecond marking the start of the process

trainingStats?: TrainingStats { endedAt, queueDuration, startedAt, trainDuration }

Additional information about the model’s training

endedAt?: string

The training end time as an ISO date string

queueDuration?: number

The training queued duration in seconds

startedAt?: string

The training start time as an ISO date string

trainDuration?: number

The training duration in seconds

uiConfig?: UiConfig { inputProperties, lorasComponent, presets, 3 more }

The UI configuration for the model

inputProperties?: Record<string, InputProperties>

Configuration for the input properties

collapsed?: boolean
lorasComponent?: LorasComponent { label, modelInput, scaleInput, modelIdInput }

Configuration for the loras component

label: string

The label of the component

modelInput: string

The input name of the model (model_array)

scaleInput: string

The input name of the scale (number_array)

modelIdInput?: string

The input model id (example: a composition or a single LoRA modelId) If specified, the model id will be attached to the output asset as a metadata If the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated

presets?: Array<Preset>

Configuration for the presets

fields: Array<string>
presets: unknown
resolutionComponent?: ResolutionComponent { heightInput, label, presets, widthInput }

Configuration for the resolution component

heightInput: string

The input name of the height

label: string

The label of the component

presets: Array<Preset>

The resolution presets

height: number
label: string
width: number
widthInput: string

The input name of the width

selects?: Record<string, unknown>

Configuration for the selects

triggerGenerate?: TriggerGenerate { label, after, position }

Configuration for the trigger generate button

label: string
after?: string

The ‘name’ of the input where the trigger generate button will be displayed (after the input). Do not specify both position and after.

position?: "bottom" | "top"

The position of the trigger generate button. If position specified, the button will be displayed at the specified position. Do not specify both position and after.

One of the following:
"bottom"
"top"
userId?: string

(Deprecated) The user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

ModelDeleteResponse = unknown
ModelCopyResponse { model }
model: Model { id, capabilities, collectionIds, 35 more }
id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

capabilities: Array<"3d23d" | "audio2audio" | "audio2video" | 29 more>

List of model capabilities (example: [“txt2img”, “img2img”, “txt2img_ip_adapter”, …])

One of the following:
"3d23d"
"audio2audio"
"audio2video"
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img23d"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"img2txt"
"img2video"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt23d"
"txt2audio"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
"txt2txt"
"txt2video"
"video23d"
"video2audio"
"video2img"
"video2video"
collectionIds: Array<string>

A list of CollectionId this model belongs to

createdAt: string

The model creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

custom: boolean

Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint

exampleAssetIds: Array<string>

List of all example asset IDs setup by the model owner

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
source: "civitai" | "huggingface" | "other" | "scenario"

The source of the model

One of the following:
"civitai"
"huggingface"
"other"
"scenario"
status: "copying" | "failed" | "new" | 3 more

The model status

One of the following:
"copying"
"failed"
"new"
"trained"
"training"
"training-canceled"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

trainingImagesNumber: number

The total number of training images

type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
updatedAt: string

The model last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

accessRestrictions?: 0 | 100 | 25 | 2 more

The access restrictions of the model 0: Free plan 25: Creator plan 50: Pro plan 75: Team plan 100: Enterprise plan

One of the following:
0
100
25
50
75
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

class?: Class { category, conceptPrompt, modelId, 5 more }

The class of the model

category: string

The category slug of the class (example: “art-style”)

conceptPrompt: string

The concept prompt of the class (example: “a sks character design”)

modelId: string

The model ID of the class (example: “stable-diffusion-v1-5”)

name: string

The class name (example: “Character Design”)

prompt: string

The class prompt (example: “a character design”)

slug: string

The class slug (example: “art-style-character-design”)

status: "published" | "unpublished"

The class status (only published classes are listed, but unpublished classes can still appear in existing models)

One of the following:
"published"
"unpublished"
thumbnails: Array<string>

Some example images URLs to showcase the class

compliantModelIds?: Array<string>

List of base model IDs compliant with the model (example: [“flux.1-dev”, “flux.1-schnell”]) This attribute is mainly used for Flux LoRA models

concepts?: Array<Concept>

The concepts is required for the type model: composition

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

epoch?: string

The epoch of the model. Only available for Flux Lora Trained models. If not set, uses the final model epoch (latest)

epochs?: Array<Epoch>

The epochs of the model. Only available for Flux Lora Trained models.

epoch: string

The epoch hash to identify the epoch

assets?: Array<Asset>

The assets of the epoch if sample prompts as been supplied during training

assetId: string

The AssetId of the image during training (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the asset

inputs?: Array<Input>

The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
modelKeyword?: string

The model keyword, this is a legacy parameter, please use conceptPrompt in parameters

name?: string

The model name (example: “Cinematic Realism”)

negativePromptEmbedding?: string

Fine-tune the model’s inferences with negative prompt embedding

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

parameters?: Parameters { age, batchSize, classPrompt, 29 more }

The parameters of the model

age?: string

Age group of the voice (for professional cloning)

Only available for ElevenLabs voice training

batchSize?: number

The batch size Less steps, and will increase the learning rate

Only available for Flux LoRA training

maximum4
minimum1
classPrompt?: string

The prompt to specify images in the same class as provided instance images

Only available for SD15 training

cloneType?: string

Type of voice cloning: “instant” (fast) or “professional” (higher quality, requires captcha)

Only available for ElevenLabs voice training

conceptPrompt?: string

The prompt with identifier specifying the instance (or subject) of the class (example: “a daiton dog”)

Default value varies depending on the model type:

  • For SD1.5: “daiton” if no class is associated with the model
  • For SDXL: “daiton”
  • For Flux: ""
gender?: string

Gender of the voice (for professional cloning)

Only available for ElevenLabs voice training

language?: string

Language of the audio samples (ISO 639-1 code)

Only available for ElevenLabs voice training

learningRate?: number

Initial learning rate (after the potential warmup period)

Default value varies depending on the model type:

  • For SD1.5 and SDXL: 0.000005
  • For Flux: 0.0001
exclusiveMinimum
minimum0
learningRateTextEncoder?: number

Initial learning rate (after the potential warmup period) for the text encoder

Maximum [Flux LoRA: 0.001] Default [SDXL: 0.00005 | Flux LoRA: 0.00001] Minimum [SDXL: 0 | Flux LoRA: 0.000001]

exclusiveMinimum
maximum0.001
minimum0
learningRateUnet?: number

Initial learning rate (after the potential warmup period) for the UNet

Only available for SDXL LoRA training

exclusiveMinimum
minimum0
lrScheduler?: "constant" | "constant-with-warmup" | "cosine" | 3 more

The scheduler type to use (default: “constant”)

Only available for SD15 and SDXL LoRA training

One of the following:
"constant"
"constant-with-warmup"
"cosine"
"cosine-with-restarts"
"linear"
"polynomial"
maxTrainSteps?: number

Maximum number of training steps to execute (default: varies depending on the model type)

For SDXL LoRA training, please use numTextTrainSteps and numUNetTrainSteps instead

Default value varies depending on the model type:

  • For SD1.5: round((number of training images * 225) / 3)
  • For SDXL: number of training images * 175
  • For Flux: number of training images * 100

Maximum value varies depending on the model type:

  • For SD1.5 and SDXL: [0, 40000]
  • For Flux: [0, 10000]
maximum40000
minimum0
nbEpochs?: number

The number of epochs to train for

Only available for Flux LoRA training

maximum30
minimum1
nbRepeats?: number

The number of times to repeat the training

Only available for Flux LoRA training

maximum30
minimum1
numTextTrainSteps?: number

The number of training steps for the text encoder

Only available for SDXL LoRA training

maximum40000
minimum0
numUNetTrainSteps?: number

The number of training steps for the UNet

Only available for SDXL LoRA training

maximum40000
minimum0
optimizeFor?: "likeness"

Optimize the model training task for a specific type of input images. The available values are:

  • “likeness”: optimize training for likeness or portrait (targets specific transformer blocks)
  • “all”: train all transformer blocks
  • “none”: train no specific transformer blocks

This parameter controls which double and single transformer blocks are trained during the LoRA training process.

Only available for Flux LoRA training

priorLossWeight?: number

The weight of prior preservation loss

Only available for SD15 and SDXL LoRA training

exclusiveMinimum
maximum1.7976931348623157
minimum0
randomCrop?: boolean

Whether to random crop or center crop images before resizing to the working resolution

Only available for SD15 and SDXL LoRA training

randomCropRatio?: number

Ratio of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
randomCropScale?: number

Scale of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
rank?: number

The dimension of the LoRA update matrices

Only available for SDXL (deprecated), Flux LoRA and Musubi training

Default value varies depending on the model type:

  • For SDXL (deprecated): 64
  • For Flux: 16
  • For Musubi: 64

Each trainer enforces its own tighter limit (Flux LoRA: [2; 64], Musubi: [2; 128])

maximum128
minimum2
removeBackgroundNoise?: boolean

Whether to remove background noise from audio samples before cloning. When enabled, each sample must be at least 5 seconds long.

Only available for ElevenLabs voice training

samplePrompts?: Array<string>

The prompts to use for each epoch Only available for Flux LoRA training

sampleSourceImages?: Array<string>

The sample prompt images (AssetIds) paired with samplePrompts Only available for Flux LoRA training Must be the same length as samplePrompts

scaleLr?: boolean

Whether to scale the learning rate

Note: Legacy parameter, will be ignored

Only available for SD15 and SDXL LoRA training

seed?: number

Used to reproduce previous results. Default: randomly generated number.

Only available for SD15 and SDXL LoRA training

maximum9007199254740991
minimum0
textEncoderTrainingRatio?: number

Whether to train the text encoder or not

Example: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps

Note: Legacy parameter, please use numTextTrainSteps and numUNetTrainSteps

Only available for SD15 and SDXL LoRA training

maximum0.99
minimum0
validationFrequency?: number

Validation frequency. Cannot be greater than maxTrainSteps value

Only available for SD15 and SDXL LoRA training

minimum0
validationPrompt?: string

Validation prompt

Only available for SD15 and SDXL LoRA training

voiceDescription?: string

Description of the voice characteristics

Only available for ElevenLabs voice training

wandbKey?: string

The Weights And Bias key to use for logging. The maximum length is 40 characters

parentModelId?: string

The id of the parent model

performanceStats?: PerformanceStats { variants, default }

Aggregated performance stats

variants: Array<Variant>

Performance metrics per variant

capability: string

The generation capability (example: “txt2img”, “img2video”, “txt2audio”)

computedAt: string

When these stats were last computed (ISO date)

variantKey: string

Unique variant identifier (example: “txt2img:1K”, “img2video:2K”, “txt2audio”)

arenaScore?: ArenaScore { arenaCategory, arenaModelName, fetchedAt, 5 more }

External quality score from arena.ai leaderboard

arenaCategory: string

Arena category (example: “text_to_image”, “image_to_video”)

arenaModelName: string

Model name on arena.ai

fetchedAt: string

When this score was last fetched (ISO date)

rank: number

Rank in the arena category

rating: number

ELO rating

ratingLower: number

ELO rating confidence interval lower bound

ratingUpper: number

ELO rating confidence interval upper bound

votes: number

Number of human votes

costPerAssetMaxCU?: number

Maximum cost per output asset (CU)

costPerAssetMinCU?: number

Minimum cost per output asset (CU)

costPerAssetP50CU?: number

Median cost per output asset (CU)

inferenceLatencyP50Sec?: number

Inference latency P50 per output asset (seconds)

inferenceLatencyP75Sec?: number

Inference latency P75 per output asset (seconds)

resolution?: string

The resolution bucket (example: “0.5K”, “1K”, “2K”, “4K”)

totalLatencyP50Sec?: number

Total latency P50 per output asset, including queue time (seconds)

totalLatencyP75Sec?: number

Total latency P75 per output asset, including queue time (seconds)

default?: string

Default variant key for quick model comparison

promptEmbedding?: string

Fine-tune the model’s inferences with prompt embedding

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

softDeletionOn?: string

The date when the model will be soft deleted (only for Free plan)

thumbnail?: Thumbnail { assetId, url }

A thumbnail for your model

assetId: string

The AssetId of the image used as a thumbnail for your model (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for your model

trainingImagePairs?: Array<TrainingImagePair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

trainingImages?: Array<TrainingImage>

The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId

id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

trainingProgress?: TrainingProgress { stage, updatedAt, position, 3 more }

Additional information about the training progress of the model

stage: "pending" | "pending-captcha" | "queued-for-train" | 2 more

The stage of the request

One of the following:
"pending"
"pending-captcha"
"queued-for-train"
"running-train"
"starting-train"
updatedAt: number

Timestamp in milliseconds of the last time the training progress was updated

position?: number

Position of the job in the queue (ie. the number of job in the queue before this one)

progress?: number

The progress of the job

maximum1
minimum0
remainingTimeMs?: number

The remaining time in milliseconds

startedAt?: number

The timestamp in millisecond marking the start of the process

trainingStats?: TrainingStats { endedAt, queueDuration, startedAt, trainDuration }

Additional information about the model’s training

endedAt?: string

The training end time as an ISO date string

queueDuration?: number

The training queued duration in seconds

startedAt?: string

The training start time as an ISO date string

trainDuration?: number

The training duration in seconds

uiConfig?: UiConfig { inputProperties, lorasComponent, presets, 3 more }

The UI configuration for the model

inputProperties?: Record<string, InputProperties>

Configuration for the input properties

collapsed?: boolean
lorasComponent?: LorasComponent { label, modelInput, scaleInput, modelIdInput }

Configuration for the loras component

label: string

The label of the component

modelInput: string

The input name of the model (model_array)

scaleInput: string

The input name of the scale (number_array)

modelIdInput?: string

The input model id (example: a composition or a single LoRA modelId) If specified, the model id will be attached to the output asset as a metadata If the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated

presets?: Array<Preset>

Configuration for the presets

fields: Array<string>
presets: unknown
resolutionComponent?: ResolutionComponent { heightInput, label, presets, widthInput }

Configuration for the resolution component

heightInput: string

The input name of the height

label: string

The label of the component

presets: Array<Preset>

The resolution presets

height: number
label: string
width: number
widthInput: string

The input name of the width

selects?: Record<string, unknown>

Configuration for the selects

triggerGenerate?: TriggerGenerate { label, after, position }

Configuration for the trigger generate button

label: string
after?: string

The ‘name’ of the input where the trigger generate button will be displayed (after the input). Do not specify both position and after.

position?: "bottom" | "top"

The position of the trigger generate button. If position specified, the button will be displayed at the specified position. Do not specify both position and after.

One of the following:
"bottom"
"top"
userId?: string

(Deprecated) The user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

ModelDownloadResponse { jobId }
jobId: string

The job id associated with the download request

ModelDeleteImagesResponse = unknown
ModelUpdateTagsResponse { added, deleted }
added: Array<string>

The list of added tags

deleted: Array<string>

The list of deleted tags

ModelTransferResponse { model }
model: Model { id, capabilities, collectionIds, 35 more }
id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

capabilities: Array<"3d23d" | "audio2audio" | "audio2video" | 29 more>

List of model capabilities (example: [“txt2img”, “img2img”, “txt2img_ip_adapter”, …])

One of the following:
"3d23d"
"audio2audio"
"audio2video"
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img23d"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"img2txt"
"img2video"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt23d"
"txt2audio"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
"txt2txt"
"txt2video"
"video23d"
"video2audio"
"video2img"
"video2video"
collectionIds: Array<string>

A list of CollectionId this model belongs to

createdAt: string

The model creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

custom: boolean

Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint

exampleAssetIds: Array<string>

List of all example asset IDs setup by the model owner

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
source: "civitai" | "huggingface" | "other" | "scenario"

The source of the model

One of the following:
"civitai"
"huggingface"
"other"
"scenario"
status: "copying" | "failed" | "new" | 3 more

The model status

One of the following:
"copying"
"failed"
"new"
"trained"
"training"
"training-canceled"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

trainingImagesNumber: number

The total number of training images

type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
updatedAt: string

The model last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

accessRestrictions?: 0 | 100 | 25 | 2 more

The access restrictions of the model 0: Free plan 25: Creator plan 50: Pro plan 75: Team plan 100: Enterprise plan

One of the following:
0
100
25
50
75
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

class?: Class { category, conceptPrompt, modelId, 5 more }

The class of the model

category: string

The category slug of the class (example: “art-style”)

conceptPrompt: string

The concept prompt of the class (example: “a sks character design”)

modelId: string

The model ID of the class (example: “stable-diffusion-v1-5”)

name: string

The class name (example: “Character Design”)

prompt: string

The class prompt (example: “a character design”)

slug: string

The class slug (example: “art-style-character-design”)

status: "published" | "unpublished"

The class status (only published classes are listed, but unpublished classes can still appear in existing models)

One of the following:
"published"
"unpublished"
thumbnails: Array<string>

Some example images URLs to showcase the class

compliantModelIds?: Array<string>

List of base model IDs compliant with the model (example: [“flux.1-dev”, “flux.1-schnell”]) This attribute is mainly used for Flux LoRA models

concepts?: Array<Concept>

The concepts is required for the type model: composition

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

epoch?: string

The epoch of the model. Only available for Flux Lora Trained models. If not set, uses the final model epoch (latest)

epochs?: Array<Epoch>

The epochs of the model. Only available for Flux Lora Trained models.

epoch: string

The epoch hash to identify the epoch

assets?: Array<Asset>

The assets of the epoch if sample prompts as been supplied during training

assetId: string

The AssetId of the image during training (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the asset

inputs?: Array<Input>

The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
modelKeyword?: string

The model keyword, this is a legacy parameter, please use conceptPrompt in parameters

name?: string

The model name (example: “Cinematic Realism”)

negativePromptEmbedding?: string

Fine-tune the model’s inferences with negative prompt embedding

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

parameters?: Parameters { age, batchSize, classPrompt, 29 more }

The parameters of the model

age?: string

Age group of the voice (for professional cloning)

Only available for ElevenLabs voice training

batchSize?: number

The batch size Less steps, and will increase the learning rate

Only available for Flux LoRA training

maximum4
minimum1
classPrompt?: string

The prompt to specify images in the same class as provided instance images

Only available for SD15 training

cloneType?: string

Type of voice cloning: “instant” (fast) or “professional” (higher quality, requires captcha)

Only available for ElevenLabs voice training

conceptPrompt?: string

The prompt with identifier specifying the instance (or subject) of the class (example: “a daiton dog”)

Default value varies depending on the model type:

  • For SD1.5: “daiton” if no class is associated with the model
  • For SDXL: “daiton”
  • For Flux: ""
gender?: string

Gender of the voice (for professional cloning)

Only available for ElevenLabs voice training

language?: string

Language of the audio samples (ISO 639-1 code)

Only available for ElevenLabs voice training

learningRate?: number

Initial learning rate (after the potential warmup period)

Default value varies depending on the model type:

  • For SD1.5 and SDXL: 0.000005
  • For Flux: 0.0001
exclusiveMinimum
minimum0
learningRateTextEncoder?: number

Initial learning rate (after the potential warmup period) for the text encoder

Maximum [Flux LoRA: 0.001] Default [SDXL: 0.00005 | Flux LoRA: 0.00001] Minimum [SDXL: 0 | Flux LoRA: 0.000001]

exclusiveMinimum
maximum0.001
minimum0
learningRateUnet?: number

Initial learning rate (after the potential warmup period) for the UNet

Only available for SDXL LoRA training

exclusiveMinimum
minimum0
lrScheduler?: "constant" | "constant-with-warmup" | "cosine" | 3 more

The scheduler type to use (default: “constant”)

Only available for SD15 and SDXL LoRA training

One of the following:
"constant"
"constant-with-warmup"
"cosine"
"cosine-with-restarts"
"linear"
"polynomial"
maxTrainSteps?: number

Maximum number of training steps to execute (default: varies depending on the model type)

For SDXL LoRA training, please use numTextTrainSteps and numUNetTrainSteps instead

Default value varies depending on the model type:

  • For SD1.5: round((number of training images * 225) / 3)
  • For SDXL: number of training images * 175
  • For Flux: number of training images * 100

Maximum value varies depending on the model type:

  • For SD1.5 and SDXL: [0, 40000]
  • For Flux: [0, 10000]
maximum40000
minimum0
nbEpochs?: number

The number of epochs to train for

Only available for Flux LoRA training

maximum30
minimum1
nbRepeats?: number

The number of times to repeat the training

Only available for Flux LoRA training

maximum30
minimum1
numTextTrainSteps?: number

The number of training steps for the text encoder

Only available for SDXL LoRA training

maximum40000
minimum0
numUNetTrainSteps?: number

The number of training steps for the UNet

Only available for SDXL LoRA training

maximum40000
minimum0
optimizeFor?: "likeness"

Optimize the model training task for a specific type of input images. The available values are:

  • “likeness”: optimize training for likeness or portrait (targets specific transformer blocks)
  • “all”: train all transformer blocks
  • “none”: train no specific transformer blocks

This parameter controls which double and single transformer blocks are trained during the LoRA training process.

Only available for Flux LoRA training

priorLossWeight?: number

The weight of prior preservation loss

Only available for SD15 and SDXL LoRA training

exclusiveMinimum
maximum1.7976931348623157
minimum0
randomCrop?: boolean

Whether to random crop or center crop images before resizing to the working resolution

Only available for SD15 and SDXL LoRA training

randomCropRatio?: number

Ratio of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
randomCropScale?: number

Scale of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
rank?: number

The dimension of the LoRA update matrices

Only available for SDXL (deprecated), Flux LoRA and Musubi training

Default value varies depending on the model type:

  • For SDXL (deprecated): 64
  • For Flux: 16
  • For Musubi: 64

Each trainer enforces its own tighter limit (Flux LoRA: [2; 64], Musubi: [2; 128])

maximum128
minimum2
removeBackgroundNoise?: boolean

Whether to remove background noise from audio samples before cloning. When enabled, each sample must be at least 5 seconds long.

Only available for ElevenLabs voice training

samplePrompts?: Array<string>

The prompts to use for each epoch Only available for Flux LoRA training

sampleSourceImages?: Array<string>

The sample prompt images (AssetIds) paired with samplePrompts Only available for Flux LoRA training Must be the same length as samplePrompts

scaleLr?: boolean

Whether to scale the learning rate

Note: Legacy parameter, will be ignored

Only available for SD15 and SDXL LoRA training

seed?: number

Used to reproduce previous results. Default: randomly generated number.

Only available for SD15 and SDXL LoRA training

maximum9007199254740991
minimum0
textEncoderTrainingRatio?: number

Whether to train the text encoder or not

Example: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps

Note: Legacy parameter, please use numTextTrainSteps and numUNetTrainSteps

Only available for SD15 and SDXL LoRA training

maximum0.99
minimum0
validationFrequency?: number

Validation frequency. Cannot be greater than maxTrainSteps value

Only available for SD15 and SDXL LoRA training

minimum0
validationPrompt?: string

Validation prompt

Only available for SD15 and SDXL LoRA training

voiceDescription?: string

Description of the voice characteristics

Only available for ElevenLabs voice training

wandbKey?: string

The Weights And Bias key to use for logging. The maximum length is 40 characters

parentModelId?: string

The id of the parent model

performanceStats?: PerformanceStats { variants, default }

Aggregated performance stats

variants: Array<Variant>

Performance metrics per variant

capability: string

The generation capability (example: “txt2img”, “img2video”, “txt2audio”)

computedAt: string

When these stats were last computed (ISO date)

variantKey: string

Unique variant identifier (example: “txt2img:1K”, “img2video:2K”, “txt2audio”)

arenaScore?: ArenaScore { arenaCategory, arenaModelName, fetchedAt, 5 more }

External quality score from arena.ai leaderboard

arenaCategory: string

Arena category (example: “text_to_image”, “image_to_video”)

arenaModelName: string

Model name on arena.ai

fetchedAt: string

When this score was last fetched (ISO date)

rank: number

Rank in the arena category

rating: number

ELO rating

ratingLower: number

ELO rating confidence interval lower bound

ratingUpper: number

ELO rating confidence interval upper bound

votes: number

Number of human votes

costPerAssetMaxCU?: number

Maximum cost per output asset (CU)

costPerAssetMinCU?: number

Minimum cost per output asset (CU)

costPerAssetP50CU?: number

Median cost per output asset (CU)

inferenceLatencyP50Sec?: number

Inference latency P50 per output asset (seconds)

inferenceLatencyP75Sec?: number

Inference latency P75 per output asset (seconds)

resolution?: string

The resolution bucket (example: “0.5K”, “1K”, “2K”, “4K”)

totalLatencyP50Sec?: number

Total latency P50 per output asset, including queue time (seconds)

totalLatencyP75Sec?: number

Total latency P75 per output asset, including queue time (seconds)

default?: string

Default variant key for quick model comparison

promptEmbedding?: string

Fine-tune the model’s inferences with prompt embedding

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

softDeletionOn?: string

The date when the model will be soft deleted (only for Free plan)

thumbnail?: Thumbnail { assetId, url }

A thumbnail for your model

assetId: string

The AssetId of the image used as a thumbnail for your model (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for your model

trainingImagePairs?: Array<TrainingImagePair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

trainingImages?: Array<TrainingImage>

The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId

id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

trainingProgress?: TrainingProgress { stage, updatedAt, position, 3 more }

Additional information about the training progress of the model

stage: "pending" | "pending-captcha" | "queued-for-train" | 2 more

The stage of the request

One of the following:
"pending"
"pending-captcha"
"queued-for-train"
"running-train"
"starting-train"
updatedAt: number

Timestamp in milliseconds of the last time the training progress was updated

position?: number

Position of the job in the queue (ie. the number of job in the queue before this one)

progress?: number

The progress of the job

maximum1
minimum0
remainingTimeMs?: number

The remaining time in milliseconds

startedAt?: number

The timestamp in millisecond marking the start of the process

trainingStats?: TrainingStats { endedAt, queueDuration, startedAt, trainDuration }

Additional information about the model’s training

endedAt?: string

The training end time as an ISO date string

queueDuration?: number

The training queued duration in seconds

startedAt?: string

The training start time as an ISO date string

trainDuration?: number

The training duration in seconds

uiConfig?: UiConfig { inputProperties, lorasComponent, presets, 3 more }

The UI configuration for the model

inputProperties?: Record<string, InputProperties>

Configuration for the input properties

collapsed?: boolean
lorasComponent?: LorasComponent { label, modelInput, scaleInput, modelIdInput }

Configuration for the loras component

label: string

The label of the component

modelInput: string

The input name of the model (model_array)

scaleInput: string

The input name of the scale (number_array)

modelIdInput?: string

The input model id (example: a composition or a single LoRA modelId) If specified, the model id will be attached to the output asset as a metadata If the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated

presets?: Array<Preset>

Configuration for the presets

fields: Array<string>
presets: unknown
resolutionComponent?: ResolutionComponent { heightInput, label, presets, widthInput }

Configuration for the resolution component

heightInput: string

The input name of the height

label: string

The label of the component

presets: Array<Preset>

The resolution presets

height: number
label: string
width: number
widthInput: string

The input name of the width

selects?: Record<string, unknown>

Configuration for the selects

triggerGenerate?: TriggerGenerate { label, after, position }

Configuration for the trigger generate button

label: string
after?: string

The ‘name’ of the input where the trigger generate button will be displayed (after the input). Do not specify both position and after.

position?: "bottom" | "top"

The position of the trigger generate button. If position specified, the button will be displayed at the specified position. Do not specify both position and after.

One of the following:
"bottom"
"top"
userId?: string

(Deprecated) The user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

ModelsDescription

Retrieve
client.models.description.retrieve(stringmodelID, DescriptionRetrieveParams { originalAssets } query?, RequestOptionsoptions?): DescriptionRetrieveResponse { description }
GET/models/{modelId}/description
Update
client.models.description.update(stringmodelID, DescriptionUpdateParams { description, originalAssets } params, RequestOptionsoptions?): DescriptionUpdateResponse { description }
PUT/models/{modelId}/description
ModelsExpand Collapse
DescriptionRetrieveResponse { description }
description: Description { assets, models, value }
assets: Array<Asset>

The list of assets referenced by the Markdown {asset} tag in the description.

id: string

The asset ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

authorId: string

The author user ID (example: “dcf121faaa1a0a0bbbd9ca1b73d62aea”)

kind: "3d" | "audio" | "document" | 4 more

The kind of asset

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
mimeType: string

The mime type of the asset (example: “image/png”)

ownerId: string

The owner (project) ID (example: “proj_23tlk332lkht3kl2” or “team_dlkhgs23tlk3hlkth32lkht3kl2” for old teams)

privacy: "private" | "public" | "unlisted"

The privacy of the asset

One of the following:
"private"
"public"
"unlisted"
properties: Properties { size, animationFrameCount, bitrate, 20 more }

The properties of the asset, content may depend on the kind of asset returned

size: number
animationFrameCount?: number

Number of animation frames if animations exist

bitrate?: number

Bitrate of the media in bits per second

boneCount?: number

Number of bones if skeleton exists

channels?: number

Number of channels of the audio

classification?: "effect" | "interview" | "music" | 5 more

Classification of the audio

One of the following:
"effect"
"interview"
"music"
"other"
"sound"
"speech"
"text"
"unknown"
codecName?: string

Codec name of the media

description?: string

Description of the audio

dimensions?: Array<number>

Bounding box dimensions [width, height, depth]

duration?: number

Duration of the media in seconds

faceCount?: number

Number of faces/triangles in the mesh

format?: string

Format of the mesh file (e.g. ‘glb’, etc.)

frameRate?: number

Frame rate of the video in frames per second

hasAnimations?: boolean

Whether the mesh has animations

hasNormals?: boolean

Whether the mesh has normal vectors

hasSkeleton?: boolean

Whether the mesh has bones/skeleton

hasUVs?: boolean

Whether the mesh has UV coordinates

height?: number
nbFrames?: number

Number of frames in the video

sampleRate?: number

Sample rate of the media in Hz

transcription?: Transcription { text }

Transcription of the audio

text: string
vertexCount?: number

Number of vertices in the mesh

width?: number
source: "3d23d" | "3d23d:texture" | "3d:texture" | 72 more

source of the asset

One of the following:
"3d23d"
"3d23d:texture"
"3d:texture"
"3d:texture:albedo"
"3d:texture:metallic"
"3d:texture:mtl"
"3d:texture:normal"
"3d:texture:roughness"
"audio2audio"
"audio2video"
"background-removal"
"canvas"
"canvas-drawing"
"canvas-export"
"detection"
"generative-fill"
"image-prompt-editing"
"img23d"
"img2img"
"img2video"
"inference-control-net"
"inference-control-net-img"
"inference-control-net-inpainting"
"inference-control-net-inpainting-ip-adapter"
"inference-control-net-ip-adapter"
"inference-control-net-reference"
"inference-control-net-texture"
"inference-img"
"inference-img-ip-adapter"
"inference-img-texture"
"inference-in-paint"
"inference-in-paint-ip-adapter"
"inference-reference"
"inference-reference-texture"
"inference-txt"
"inference-txt-ip-adapter"
"inference-txt-texture"
"patch"
"pixelization"
"reframe"
"restyle"
"segment"
"segmentation-image"
"segmentation-mask"
"skybox-3d"
"skybox-base-360"
"skybox-hdri"
"texture"
"texture:albedo"
"texture:ao"
"texture:edge"
"texture:height"
"texture:metallic"
"texture:normal"
"texture:smoothness"
"txt23d"
"txt2audio"
"txt2img"
"txt2video"
"unknown"
"uploaded"
"uploaded-3d"
"uploaded-audio"
"uploaded-avatar"
"uploaded-video"
"upscale"
"upscale-skybox"
"upscale-texture"
"upscale-video"
"vectorization"
"video23d"
"video2audio"
"video2img"
"video2video"
"voice-clone"
url: string

Signed URL to get the asset content

originalFileUrl?: string

The original file url.

Contains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets. Is only specified if the given asset data has been replaced with a new file during the creation of the asset.

preview?: Preview { assetId, url }

The asset’s preview.

Contains the assetId and the url of the preview.

assetId: string
url: string
thumbnail?: Thumbnail { assetId, url }

The asset’s thumbnail.

Contains the assetId and the url of the thumbnail.

assetId: string
url: string
models: Array<Model>

The list of models referenced by the Markdown {model} tag in the description.

id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

name?: string

The model name (example: “Cinematic Realism”)

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

value: string

The markdown description of the model (ex: # My model). We allow the {asset:<assetId>} and {model:<modelId>} tags.

DescriptionUpdateResponse { description }
description: Description { assets, models, value }
assets: Array<Asset>

The list of assets referenced by the Markdown {asset} tag in the description.

id: string

The asset ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

authorId: string

The author user ID (example: “dcf121faaa1a0a0bbbd9ca1b73d62aea”)

kind: "3d" | "audio" | "document" | 4 more

The kind of asset

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
mimeType: string

The mime type of the asset (example: “image/png”)

ownerId: string

The owner (project) ID (example: “proj_23tlk332lkht3kl2” or “team_dlkhgs23tlk3hlkth32lkht3kl2” for old teams)

privacy: "private" | "public" | "unlisted"

The privacy of the asset

One of the following:
"private"
"public"
"unlisted"
properties: Properties { size, animationFrameCount, bitrate, 20 more }

The properties of the asset, content may depend on the kind of asset returned

size: number
animationFrameCount?: number

Number of animation frames if animations exist

bitrate?: number

Bitrate of the media in bits per second

boneCount?: number

Number of bones if skeleton exists

channels?: number

Number of channels of the audio

classification?: "effect" | "interview" | "music" | 5 more

Classification of the audio

One of the following:
"effect"
"interview"
"music"
"other"
"sound"
"speech"
"text"
"unknown"
codecName?: string

Codec name of the media

description?: string

Description of the audio

dimensions?: Array<number>

Bounding box dimensions [width, height, depth]

duration?: number

Duration of the media in seconds

faceCount?: number

Number of faces/triangles in the mesh

format?: string

Format of the mesh file (e.g. ‘glb’, etc.)

frameRate?: number

Frame rate of the video in frames per second

hasAnimations?: boolean

Whether the mesh has animations

hasNormals?: boolean

Whether the mesh has normal vectors

hasSkeleton?: boolean

Whether the mesh has bones/skeleton

hasUVs?: boolean

Whether the mesh has UV coordinates

height?: number
nbFrames?: number

Number of frames in the video

sampleRate?: number

Sample rate of the media in Hz

transcription?: Transcription { text }

Transcription of the audio

text: string
vertexCount?: number

Number of vertices in the mesh

width?: number
source: "3d23d" | "3d23d:texture" | "3d:texture" | 72 more

source of the asset

One of the following:
"3d23d"
"3d23d:texture"
"3d:texture"
"3d:texture:albedo"
"3d:texture:metallic"
"3d:texture:mtl"
"3d:texture:normal"
"3d:texture:roughness"
"audio2audio"
"audio2video"
"background-removal"
"canvas"
"canvas-drawing"
"canvas-export"
"detection"
"generative-fill"
"image-prompt-editing"
"img23d"
"img2img"
"img2video"
"inference-control-net"
"inference-control-net-img"
"inference-control-net-inpainting"
"inference-control-net-inpainting-ip-adapter"
"inference-control-net-ip-adapter"
"inference-control-net-reference"
"inference-control-net-texture"
"inference-img"
"inference-img-ip-adapter"
"inference-img-texture"
"inference-in-paint"
"inference-in-paint-ip-adapter"
"inference-reference"
"inference-reference-texture"
"inference-txt"
"inference-txt-ip-adapter"
"inference-txt-texture"
"patch"
"pixelization"
"reframe"
"restyle"
"segment"
"segmentation-image"
"segmentation-mask"
"skybox-3d"
"skybox-base-360"
"skybox-hdri"
"texture"
"texture:albedo"
"texture:ao"
"texture:edge"
"texture:height"
"texture:metallic"
"texture:normal"
"texture:smoothness"
"txt23d"
"txt2audio"
"txt2img"
"txt2video"
"unknown"
"uploaded"
"uploaded-3d"
"uploaded-audio"
"uploaded-avatar"
"uploaded-video"
"upscale"
"upscale-skybox"
"upscale-texture"
"upscale-video"
"vectorization"
"video23d"
"video2audio"
"video2img"
"video2video"
"voice-clone"
url: string

Signed URL to get the asset content

originalFileUrl?: string

The original file url.

Contains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets. Is only specified if the given asset data has been replaced with a new file during the creation of the asset.

preview?: Preview { assetId, url }

The asset’s preview.

Contains the assetId and the url of the preview.

assetId: string
url: string
thumbnail?: Thumbnail { assetId, url }

The asset’s thumbnail.

Contains the assetId and the url of the thumbnail.

assetId: string
url: string
models: Array<Model>

The list of models referenced by the Markdown {model} tag in the description.

id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

name?: string

The model name (example: “Cinematic Realism”)

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

value: string

The markdown description of the model (ex: # My model). We allow the {asset:<assetId>} and {model:<modelId>} tags.

ModelsExamples

List
client.models.examples.list(stringmodelID, ExampleListParams { originalAssets } query?, RequestOptionsoptions?): ExampleListResponse { examples }
GET/models/{modelId}/examples
Update
client.models.examples.update(stringmodelID, ExampleUpdateParams { assetIds, originalAssets } params, RequestOptionsoptions?): ExampleUpdateResponse { examples }
PUT/models/{modelId}/examples
ModelsExpand Collapse
ExampleListResponse { examples }
examples: Array<Example>
asset: Asset { id, authorId, collectionIds, 24 more }

Asset generated by the inference

id: string

The asset ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

authorId: string

The author user ID (example: “dcf121faaa1a0a0bbbd9ca1b73d62aea”)

collectionIds: Array<string>

A list of CollectionId this asset belongs to

createdAt: string

The asset creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

editCapabilities: Array<"DETECTION" | "GENERATIVE_FILL" | "PIXELATE" | 8 more>

List of edit capabilities

One of the following:
"DETECTION"
"GENERATIVE_FILL"
"PIXELATE"
"PROMPT_EDITING"
"REFINE"
"REFRAME"
"REMOVE_BACKGROUND"
"SEGMENTATION"
"UPSCALE"
"UPSCALE_360"
"VECTORIZATION"
kind: "3d" | "audio" | "document" | 4 more

The kind of asset

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
metadata: Metadata { kind, type, angular, 106 more }

Metadata of the asset with some additional information

kind: "3d" | "audio" | "document" | 4 more
One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
type: "3d-texture" | "3d-texture-albedo" | "3d-texture-metallic" | 72 more

The type of the asset. Ex: ‘inference-txt2img’ will represent an asset generated from a text to image model

One of the following:
"3d-texture"
"3d-texture-albedo"
"3d-texture-metallic"
"3d-texture-mtl"
"3d-texture-normal"
"3d-texture-roughness"
"3d23d"
"3d23d-texture"
"audio2audio"
"audio2video"
"background-removal"
"canvas"
"canvas-drawing"
"canvas-export"
"detection"
"generative-fill"
"image-prompt-editing"
"img23d"
"img2img"
"img2video"
"inference-controlnet"
"inference-controlnet-img2img"
"inference-controlnet-inpaint"
"inference-controlnet-inpaint-ip-adapter"
"inference-controlnet-ip-adapter"
"inference-controlnet-reference"
"inference-controlnet-texture"
"inference-img2img"
"inference-img2img-ip-adapter"
"inference-img2img-texture"
"inference-inpaint"
"inference-inpaint-ip-adapter"
"inference-reference"
"inference-reference-texture"
"inference-txt2img"
"inference-txt2img-ip-adapter"
"inference-txt2img-texture"
"patch"
"pixelization"
"reframe"
"restyle"
"segment"
"segmentation-image"
"segmentation-mask"
"skybox-3d"
"skybox-base-360"
"skybox-hdri"
"texture"
"texture-albedo"
"texture-ao"
"texture-edge"
"texture-height"
"texture-metallic"
"texture-normal"
"texture-smoothness"
"txt23d"
"txt2audio"
"txt2img"
"txt2video"
"unknown"
"uploaded"
"uploaded-3d"
"uploaded-audio"
"uploaded-avatar"
"uploaded-video"
"upscale"
"upscale-skybox"
"upscale-texture"
"upscale-video"
"vectorization"
"video23d"
"video2audio"
"video2img"
"video2video"
"voice-clone"
angular?: number

How angular is the surface? 0 is like a sphere, 1 is like a mechanical object

maximum1
minimum0
aspectRatio?: string

The optional aspect ratio given for the generation, only applicable for some models

backgroundOpacity?: number

Int to set between 0 and 255 for the opacity of the background in the result images.

maximum255
minimum0
baseModelId?: string

The baseModelId that maybe changed at inference time

bbox?: Array<number>

A bounding box around the object of interest, in the format [x1, y1, x2, y2].

betterQuality?: boolean

Remove small dark spots (i.e. “pepper”) and connect small bright cracks.

cannyStructureImage?: string

The control image already processed by canny detector. Must reference an existing AssetId.

clustering?: boolean

Activate clustering.

colorCorrection?: boolean

Ensure upscaled tile have the same color histogram as original tile.

colorMode?: string
colorPrecision?: number
concepts?: Array<Concept>

Flux Kontext LoRA to style the image. For Flux Kontext Prompt Editing.

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

contours?: Array<Array<Array<Array<number>>>>
controlEnd?: number

End step for control.

copiedAt?: string

The date when the asset was copied to a project

cornerThreshold?: number
creativity?: number

Allow the generation of “hallucinations” during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.

maximum100
minimum0
creativityDecay?: number

Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.

maximum100
minimum0
defaultParameters?: boolean

If true, use the default parameters

depthFidelity?: number

The depth fidelity if a depth image provided

maximum100
minimum0
depthImage?: string

The control image processed by depth estimator. Must reference an existing AssetId.

detailsLevel?: number

Amount of details to remove or add

maximum50
minimum-50
dilate?: number

The number of pixels to dilate the result masks.

maximum30
minimum0
factor?: number

Contrast factor for Grayscale detector

filterSpeckle?: number
fractality?: number

Determine the scale at which the upscale process works.

  • With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.
  • With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.

(info): A small value is slower and more expensive to run.

maximum100
minimum0
geometryEnforcement?: number

Apply extra control to the Skybox 360 geometry. The higher the value, the more the 360 geometry will influence the generated skybox image.

Use with caution. Default is adapted to the other parameters.

maximum100
minimum0
guidance?: number

The guidance used to generate this asset

halfMode?: boolean
hdr?: number
height?: number
highThreshold?: number

High threshold for Canny detector

horizontalExpansionRatio?: number

(deprecated) Horizontal expansion ratio.

maximum2
minimum1
image?: string

The input image to process. Must reference an existing AssetId or be a data URL.

imageFidelity?: number

Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.

maximum100
minimum0
imageType?: "seamfull" | "skybox" | "texture"

Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).

One of the following:
"seamfull"
"skybox"
"texture"
inferenceId?: string

The id of the Inference describing how this image was generated

inputFidelity?: "high" | "low"

When set to high, allows to better preserve details from the input images in the output. This is especially useful when using images that contain elements like faces or logos that require accurate preservation in the generated image.

You can provide multiple input images that will all be preserved with high fidelity, but keep in mind that the first image will be preserved with richer textures and finer details, so if you include elements such as faces, consider placing them in the first image.

Only available for the gpt-image-1 model.

One of the following:
"high"
"low"
inputLocation?: "bottom" | "left" | "middle" | 2 more

Location of the input image in the output.

One of the following:
"bottom"
"left"
"middle"
"right"
"top"
invert?: boolean

To invert the relief

keypointThreshold?: number

How polished is the surface? 0 is like a rough surface, 1 is like a mirror

maximum1
minimum0
layerDifference?: number
lengthThreshold?: number
lockExpiresAt?: string

The ISO timestamp when the lock on the canvas will expire

lowThreshold?: number

Low threshold for Canny detector

mask?: string

The mask used for the asset generation or editing

maxIterations?: number
maxThreshold?: number

Maximum threshold for Grayscale conversion

minThreshold?: number

Minimum threshold for Grayscale conversion

modality?: "canny" | "depth" | "grayscale" | 7 more

Modality to detect

One of the following:
"canny"
"depth"
"grayscale"
"lineart_anime"
"mlsd"
"normal"
"pose"
"scribble"
"segmentation"
"sketch"
mode?: string
modelId?: string

The modelId used to generate this asset

modelType?: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The type of the generator used

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
name?: string
nbMasks?: number
negativePrompt?: string

The negative prompt used to generate this asset

negativePromptStrength?: number

Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence. Must be > 0 if negativePrompt is provided.

maximum10
minimum0
numInferenceSteps?: number

The number of denoising steps for each image generation.

maximum50
minimum5
numOutputs?: number

The number of outputs to generate.

maximum8
minimum1
originalAssetId?: string
outputIndex?: number
overlapPercentage?: number

Overlap percentage for the output image.

maximum0.5
minimum0
overrideEmbeddings?: boolean

Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution.

parentId?: string
parentJobId?: string
pathPrecision?: number
points?: Array<Array<number>>

List of points (label, x, y) in the image where label = 0 for background and 1 for object.

polished?: number

How polished is the surface? 0 is like a rough surface, 1 is like a mirror

maximum1
minimum0
preset?: string
progressPercent?: number
prompt?: string

The prompt that guided the asset generation or editing

promptFidelity?: number

Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.

maximum100
minimum0
raised?: number

How raised is the surface? 0 is flat like water, 1 is like a very rough rock

maximum1
minimum0
referenceImages?: Array<string>

The reference images used for the asset generation or editing

refinementSteps?: number

Additional refinement steps before scaling.

If scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times. If scalingFactor > 1, the refinement process will be applied refinementSteps times.

maximum4
minimum0
removeBackground?: boolean

Remove background for Grayscale detector

resizeOption?: number

Size proportion of the input image in the output.

maximum1
minimum0.1
resultContours?: boolean

Boolean to output the contours.

resultImage?: boolean

Boolean to able output the cut out object.

resultMask?: boolean

Boolean to able return the masks (binary image) in the response.

rootParentId?: string
saveFlipbook?: boolean

Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px

scalingFactor?: number

Scaling factor (when targetWidth not specified)

maximum16
minimum1
scheduler?: string

The scheduler used to generate this asset

seed?: string

The seed used to generate this asset. <!> Can be a string or a number in some cases <!>.

sharpen?: boolean

Sharpen tiles.

shiny?: number

How shiny is the surface? 0 is like a matte surface, 1 is like a diamond

maximum1
minimum0
size?: number
sketch?: boolean

Activate sketch detection instead of canny.

sourceProjectId?: string
spliceThreshold?: number
strength?: number

The strength

Only available for the flux-kontext LoRA model.

structureFidelity?: number

Strength for the input image structure preservation

maximum100
minimum0
structureImage?: string

The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId.

style?: "3d-cartoon" | "3d-rendered" | "anime" | 23 more
One of the following:
"3d-cartoon"
"3d-rendered"
"anime"
"cartoon"
"cinematic"
"claymation"
"cloud-skydome"
"comic"
"cyberpunk"
"enchanted"
"fantasy"
"ink"
"manga"
"manga-color"
"minimalist"
"neon-tron"
"oil-painting"
"pastel"
"photo"
"photography"
"psychedelic"
"retro-fantasy"
"scifi-concept-art"
"space"
"standard"
"whimsical"
styleFidelity?: number

The higher the value the more it will look like the style image(s)

maximum100
minimum0
styleImages?: Array<string>

List of style images. Most of the time, only one image is enough. It must be existing AssetIds.

styleImagesFidelity?: number

Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.

maximum100
minimum0
targetHeight?: number

The target height of the output image.

maximum2048
minimum0
targetWidth?: number

Target width for the upscaled image, take priority over scaling factor

maximum16000
minimum1024
text?: string

A textual description / keywords describing the object of interest.

maxLength100
texture?: string

The asset to convert in texture maps. Must reference an existing AssetId.

thumbnail?: Thumbnail { assetId, url }

The thumbnail of the canvas

assetId: string

The AssetId of the image used as a thumbnail for the canvas (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for the canvas

tileStyle?: boolean

If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.

trainingImage?: boolean
verticalExpansionRatio?: number

(deprecated) Vertical expansion ratio.

maximum2
minimum1
width?: number

The width of the rendered image.

maximum2048
minimum1024
mimeType: string

The mime type of the asset (example: “image/png”)

ownerId: string

The owner (project) ID (example: “proj_23tlk332lkht3kl2” or “team_dlkhgs23tlk3hlkth32lkht3kl2” for old teams)

privacy: "private" | "public" | "unlisted"

The privacy of the asset

One of the following:
"private"
"public"
"unlisted"
properties: Properties { size, animationFrameCount, bitrate, 20 more }

The properties of the asset, content may depend on the kind of asset returned

size: number
animationFrameCount?: number

Number of animation frames if animations exist

bitrate?: number

Bitrate of the media in bits per second

boneCount?: number

Number of bones if skeleton exists

channels?: number

Number of channels of the audio

classification?: "effect" | "interview" | "music" | 5 more

Classification of the audio

One of the following:
"effect"
"interview"
"music"
"other"
"sound"
"speech"
"text"
"unknown"
codecName?: string

Codec name of the media

description?: string

Description of the audio

dimensions?: Array<number>

Bounding box dimensions [width, height, depth]

duration?: number

Duration of the media in seconds

faceCount?: number

Number of faces/triangles in the mesh

format?: string

Format of the mesh file (e.g. ‘glb’, etc.)

frameRate?: number

Frame rate of the video in frames per second

hasAnimations?: boolean

Whether the mesh has animations

hasNormals?: boolean

Whether the mesh has normal vectors

hasSkeleton?: boolean

Whether the mesh has bones/skeleton

hasUVs?: boolean

Whether the mesh has UV coordinates

height?: number
nbFrames?: number

Number of frames in the video

sampleRate?: number

Sample rate of the media in Hz

transcription?: Transcription { text }

Transcription of the audio

text: string
vertexCount?: number

Number of vertices in the mesh

width?: number
source: "3d23d" | "3d23d:texture" | "3d:texture" | 72 more

source of the asset

One of the following:
"3d23d"
"3d23d:texture"
"3d:texture"
"3d:texture:albedo"
"3d:texture:metallic"
"3d:texture:mtl"
"3d:texture:normal"
"3d:texture:roughness"
"audio2audio"
"audio2video"
"background-removal"
"canvas"
"canvas-drawing"
"canvas-export"
"detection"
"generative-fill"
"image-prompt-editing"
"img23d"
"img2img"
"img2video"
"inference-control-net"
"inference-control-net-img"
"inference-control-net-inpainting"
"inference-control-net-inpainting-ip-adapter"
"inference-control-net-ip-adapter"
"inference-control-net-reference"
"inference-control-net-texture"
"inference-img"
"inference-img-ip-adapter"
"inference-img-texture"
"inference-in-paint"
"inference-in-paint-ip-adapter"
"inference-reference"
"inference-reference-texture"
"inference-txt"
"inference-txt-ip-adapter"
"inference-txt-texture"
"patch"
"pixelization"
"reframe"
"restyle"
"segment"
"segmentation-image"
"segmentation-mask"
"skybox-3d"
"skybox-base-360"
"skybox-hdri"
"texture"
"texture:albedo"
"texture:ao"
"texture:edge"
"texture:height"
"texture:metallic"
"texture:normal"
"texture:smoothness"
"txt23d"
"txt2audio"
"txt2img"
"txt2video"
"unknown"
"uploaded"
"uploaded-3d"
"uploaded-audio"
"uploaded-avatar"
"uploaded-video"
"upscale"
"upscale-skybox"
"upscale-texture"
"upscale-video"
"vectorization"
"video23d"
"video2audio"
"video2img"
"video2video"
"voice-clone"
status: "error" | "pending" | "success"

The actual status

One of the following:
"error"
"pending"
"success"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

updatedAt: string

The asset last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

url: string

Signed URL to get the asset content

automaticCaptioning?: string

Automatic captioning of the asset

description?: string

The description, it will contain in priority:

  • the manual description
  • the advanced captioning when the asset is used in training flow
  • the automatic captioning
embedding?: Array<number>

The embedding of the asset when requested.

Only available when an asset can be embedded (ie: not Detection maps)

firstFrame?: FirstFrame { assetId, url }

The video asset’s first frame.

Contains the assetId and the url of the first frame.

assetId: string
url: string
isHidden?: boolean

Whether the asset is hidden.

lastFrame?: LastFrame { assetId, url }

The video asset’s last frame.

Contains the assetId and the url of the last frame.

assetId: string
url: string
nsfw?: Array<string>

The NSFW labels

originalFileUrl?: string

The original file url.

Contains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets. Is only specified if the given asset data has been replaced with a new file during the creation of the asset.

outputIndex?: number

The output index of the asset within a job This index is an positive integer that starts at 0 It is used to differentiate between multiple outputs of the same job If the job has only one output, this index is 0

preview?: Preview { assetId, url }

The asset’s preview.

Contains the assetId and the url of the preview.

assetId: string
url: string
thumbnail?: Thumbnail { assetId, url }

The asset’s thumbnail.

Contains the assetId and the url of the thumbnail.

assetId: string
url: string
modelId: string

Model id of the model used to generate the asset

inferenceId?: string

Inference id of the inference used to generate the asset

inferenceParameters?: InferenceParameters { prompt, type, aspectRatio, 36 more }

The inference parameters used to generate the asset

prompt: string

Full text prompt including the model placeholder. (example: “an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect”)

type: "controlnet" | "controlnet_img2img" | "controlnet_inpaint" | 15 more

The type of inference to use. Example: txt2img, img2img, etc.

Selecting the right type will condition the expected parameters.

Note: if model.type is sd-xl* or sd-1_5*, when using the "inpaint" inference type, Scenario determines the best available baseModel for a given modelId: one of `[“stable-diffusion-inpainting”, “stable-diffusion-xl-1.0-inpainting-0.1”] will be used.

One of the following:
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
aspectRatio?: "16:9" | "1:1" | "21:9" | 8 more

The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra. The aspect ratio is a string formatted as “width:height” (example: “16:9”).

One of the following:
"16:9"
"1:1"
"21:9"
"2:3"
"3:2"
"3:4"
"4:3"
"4:5"
"5:4"
"9:16"
"9:21"
baseModelId?: string

The base model to use for the inference. Only Flux LoRA models can use this parameter. Allowed values are available in the model’s attribute: compliantModelIds

concepts?: Array<Concept>
modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

controlEnd?: number

Specifies how long the ControlNet guidance should be applied during the inference process.

Only available for Flux.1-dev based models.

The value represents the percentage of total inference steps where the ControlNet guidance is active. For example:

  • 1.0: ControlNet guidance is applied during all inference steps
  • 0.5: ControlNet guidance is only applied during the first half of inference steps

Default values:

  • 0.5 for Canny modality
  • 0.6 for all other modalities
maximum1
minimum0.1
controlImage?: string

Signed URL to display the controlnet input image

controlImageId?: string

Asset id of the controlnet input image

controlStart?: number

Specifies the starting point of the ControlNet guidance during the inference process.

Only available for Flux.1-dev based models.

The value represents the percentage of total inference steps where the ControlNet guidance starts. For example:

  • 0.0: ControlNet guidance starts at the beginning of the inference steps
  • 0.5: ControlNet guidance starts at the middle of the inference steps
maximum0.9
minimum0
disableMerging?: boolean

If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.

disableModalityDetection?: boolean

If false, the process uses the given image to detect the modality. If true (default), the process will not try to detect the modality of the given image.

For example: with pose modality and false value, the process will detect the pose of people in the given image with depth modality and false value, the process will detect the depth of the given image with scribble modality and truevalue, the process will use the given image as a scribble

⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️

guidance?: number

Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:

  • For Flux dev models, the default is 3.5 and allowed values are within [0, 10]
  • For Flux pro models, the default is 3 and allowed values are within [2, 5]
  • For SDXL models, the default is 6 and allowed values are within [0, 20]
  • For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]
maximum20
minimum0
height?: number

The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512) If model.type is sd-xl, sd-xl-lora, sd-xl-composition the height must be within [512, 2048] If model.type is sd-1_5, the height must be within [64, 1024] If model.type is flux.1.1-pro-ultra, you can use the aspectRatio parameter instead

maximum2048
minimum64
multipleOf8
hideResults?: boolean

If set, generated assets will be hidden and not returned in the list of images of the inference or when listing assets (default: false)

image?: string

Signed URL to display the input image

imageId?: string

Asset id of the input image

intermediateImages?: boolean

Enable or disable the intermediate images generation (default: false)

ipAdapterImage?: string

Signed URL to display the IpAdapter image

ipAdapterImageId?: string

Asset id of the input IpAdapter image

ipAdapterImageIds?: Array<string>

Asset id of the input IpAdapter images

ipAdapterImages?: Array<string>

Signed URL to display the IpAdapter images

ipAdapterScale?: number

IpAdapter scale factor (within [0.0, 1.0], default: 0.9).

maximum1
minimum0
ipAdapterScales?: Array<number>

IpAdapter scale factors (within [0.0, 1.0], default: 0.9).

maximum1
minimum0
ipAdapterType?: "character" | "style"

The type of IP Adapter model to use. Must be one of [style, character], default to `style“

One of the following:
"character"
"style"
mask?: string

Signed URL to display the mask image

maskId?: string

Asset id of the mask image

modality?: string

The modality associated with the control image used for the generation: it can either be an object with a combination of maximum

For models of SD1.5 family:

  • up to 3 modalities from canny, pose, depth, lines, seg, scribble, lineart, normal-map, illusion
  • or one of the following presets: character, landscape, city, interior.

For models of the SDXL family:

  • up to 3 modalities from canny, pose, depth, seg, illusion, scribble
  • or one of the following presets: character, landscape.

For models of the FLUX schnell or dev families:

  • one modality from: canny, tile, depth, blur, pose, gray, low-quality

Optionally, you can associate a value to these modalities or presets. The value must be within ]0.0, 1.0].

Examples:

  • canny
  • depth:0.5,pose:1.0
  • canny:0.5,depth:0.5,lines:0.3
  • landscape
  • character:0.5
  • illusion:1

Note: if you use a value that is not supported by the model family, this will result in an error.

modelEpoch?: string

The epoch of the model to use for the inference. Only available for Flux Lora Trained models.

negativePrompt?: string

The prompt not to guide the image generation, ignored when guidance < 1 (example: “((ugly face))”) For Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet.

negativePromptStrength?: number

Only applicable for flux-dev based models for txt2img, img2img, and controlnet inference types.

Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence. Must be > 0 if negativePrompt is provided.

maximum10
minimum0
numInferenceSteps?: number

The number of denoising steps for each image generation (within [1, 150], default: 30)

maximum150
minimum1
numSamples?: number

The number of images to generate (within [1, 128], default: 4)

maximum128
minimum1
referenceAdain?: boolean

Whether to use reference adain Only for “reference” inference type

referenceAttn?: boolean

Whether to use reference query for self attention’s context Only for “reference” inference type

scheduler?: "DDIMScheduler" | "DDPMScheduler" | "DEISMultistepScheduler" | 12 more

The scheduler to use to override the default configured for the model. See detailed documentation for more details.

One of the following:
"DDIMScheduler"
"DDPMScheduler"
"DEISMultistepScheduler"
"DPMSolverMultistepScheduler"
"DPMSolverSinglestepScheduler"
"EulerAncestralDiscreteScheduler"
"EulerDiscreteScheduler"
"HeunDiscreteScheduler"
"KDPM2AncestralDiscreteScheduler"
"KDPM2DiscreteScheduler"
"LCMScheduler"
"LMSDiscreteScheduler"
"PNDMScheduler"
"TCDScheduler"
"UniPCMultistepScheduler"
seed?: string

Used to reproduce previous results. Default: randomly generated number.

maximum2147483647
minimum0
strength?: number

Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image’s details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)

maximum1
minimum0.01
styleFidelity?: number

If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced Only for “reference” inference type

maximum1
minimum0
width?: number

The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512) If model.type is sd-xl, sd-xl-lora, sd-xl-composition the width must be within [512, 2048] If model.type is sd-1_5, the width must be within [64, 1024] If model.type is flux.1.1-pro-ultra, you can use the aspectRatio parameter instead

maximum2048
minimum64
multipleOf8
job?: Job { createdAt, jobId, jobType, 8 more }

The job associated with the asset

createdAt: string

The job creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

jobId: string

The job ID (example: “job_ocZCnG1Df35XRL1QyCZSRxAG8”)

jobType: "assets-download" | "canvas-export" | "caption" | 36 more

The type of job

One of the following:
"assets-download"
"canvas-export"
"caption"
"caption-llava"
"custom"
"describe-style"
"detection"
"embed"
"flux"
"flux-model-training"
"generate-prompt"
"image-generation"
"image-prompt-editing"
"inference"
"mesh-preview-rendering"
"model-download"
"model-import"
"model-training"
"musubi-model-training"
"openai-image-generation"
"patch-image"
"pixelate"
"reframe"
"remove-background"
"repaint"
"restyle"
"segment"
"skybox-3d"
"skybox-base-360"
"skybox-hdri"
"skybox-upscale-360"
"texture"
"translate"
"upload"
"upscale"
"upscale-skybox"
"upscale-texture"
"vectorize"
"workflow"
metadata: Metadata { assetIds, error, flow, 6 more }

Metadata of the job with some additional information

assetIds?: Array<string>

List of produced assets for this job

error?: string | null

Eventual error for the job

flow?: Array<Flow>

The flow of the job. Only available for workflow jobs.

id: string

The id of the node.

status: "failure" | "pending" | "processing" | 2 more

The status of the node. Only available for WorkflowJob nodes.

One of the following:
"failure"
"pending"
"processing"
"skipped"
"success"
type: "custom-model" | "for-each" | "generate-prompt" | 7 more

The type of the job for the node.

One of the following:
"custom-model"
"for-each"
"generate-prompt"
"list"
"logic"
"model"
"remove-background"
"transform"
"user-approval"
"workflow"
assets?: Array<Asset>

List of produced assets for this node.

assetId: string
url: string
count?: number

Fixed number of iterations for a ForEach node. When set, the loop runs exactly count times regardless of array input. When not set, the loop iterates over the resolved array input. Only available for ForEach nodes.

dependsOn?: Array<string>

The nodes that this node depends on. Only available for nodes that have dependencies. Mainly used for user approval nodes.

includeOutputsInWorkflowJob?: true

If true, the outputs of this node will be included in the workflow job’s final output. Only applicable to producing nodes (custom-model, inference, etc.). By default, only last nodes (nodes not referenced by other nodes) contribute to outputs. Set this to true to also include intermediate nodes in the final output. Note: This should only be set to true or left undefined.

inputs?: Array<Input>

The inputs of the node.

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

items?: Array<Array<Item>>

The configured items for inputs_array type inputs. Each item is an array of SubNodeInput that need ref/value resolution. Only available for inputs_array type.

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

ref?: Ref { conditional, equal, name, node }

The reference to another input or output of the same workflow. Must have at least one of node or conditional.

conditional?: Array<string>

The conditional nodes to reference. If the conditional nodes are successful, the node will be successful. If the conditional nodes are skipped, the node will be skipped. Contains an array of node ids used to check the status of the nodes.

equal?: string

This is the desired node output value if ref is an if/else node.

name?: string

The name of the input or output to reference. If the type is ‘workflow’, the name is the name of the input of the workflow is required If the type is ‘node’, the name is not mandatory, except if you want all outputs of the node. To get all outputs of a node, you can use the name ‘all’.

node?: string

The node id or ‘workflow’ if the source is a workflow input.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
value?: unknown

The value of the input. This is the value of the input that will be used to run the node. Only available for flows managed by a WorkflowJob.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

ref?: Ref { conditional, equal, name, node }

The reference to another input or output of the same workflow. Must have at least one of node or conditional.

conditional?: Array<string>

The conditional nodes to reference. If the conditional nodes are successful, the node will be successful. If the conditional nodes are skipped, the node will be skipped. Contains an array of node ids used to check the status of the nodes.

equal?: string

This is the desired node output value if ref is an if/else node.

name?: string

The name of the input or output to reference. If the type is ‘workflow’, the name is the name of the input of the workflow is required If the type is ‘node’, the name is not mandatory, except if you want all outputs of the node. To get all outputs of a node, you can use the name ‘all’.

node?: string

The node id or ‘workflow’ if the source is a workflow input.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
value?: unknown

The value of the input. This is the value of the input that will be used to run the node. Only available for flows managed by a WorkflowJob.

items?: Array<string>

Statically-configured items for a List node. The node outputs this array as-is when executed. Only available for List nodes. The values can be strings, numbers, or asset IDs.

iterationIndex?: number

Zero-based index of the iteration this node copy belongs to. Set on dynamically-created copies of loop body nodes.

jobId?: string

If the flow is part of a WorkflowJob, this is the jobId for the node. jobId is only available for nodes started. A node “Pending” for a running workflow job is not started.

logic?: Logic { cases, default, transform }

The logic of the node. Only available for logic nodes.

cases?: Array<Case>

The cases of the logic. Only available for if/else nodes.

condition: string
value: string
default?: string

The default case of the logic. Contains the id/output of the node to execute if no case is matched. Only available for if/else nodes.

transform?: string

The transform of the logic. Only available for transform nodes.

logicType?: "if-else"

The type of the logic for the node. Only available for logic nodes.

loopBodyNodeIds?: Array<string>

IDs of the body template nodes that belong to this ForEach loop. At runtime these templates are cloned once per iteration and marked Skipped. Only available for ForEach nodes.

loopNodeId?: string

ID of the ForEach node that spawned this iteration copy. Set on dynamically-created copies of loop body nodes.

modelId?: string

The model id for the node. Mainly used for custom model tasks.

output?: unknown

The output of the node. Only available for logic nodes.

workflowId?: string

The workflow id for the node. Mainly used for workflow tasks.

hint?: string

Actionable hint for the user explaining what went wrong and how to resolve it.

input?: Record<string, unknown>

The inputs for the job

output?: Record<string, unknown>

May contain the output of the job for specific custom models jobs. Only available for custom models which generate non-assets outputs. Example: LLM text results.

outputModelId?: string

For voice-clone jobs: the ID of the model being trained.

workflowId?: string

The workflow ID of the job if job is part of a workflow.

workflowJobId?: string

The workflow job ID of the job if job is part of a workflow job.

progress: number

Progress of the job (between 0 and 1)

status: "canceled" | "failure" | "finalizing" | 5 more

The current status of the job

One of the following:
"canceled"
"failure"
"finalizing"
"in-progress"
"pending"
"queued"
"success"
"warming-up"
statusHistory: Array<StatusHistory>

The history of the different statuses the job went through with the ISO string date of when the job reached each statuses.

date: string
status: "canceled" | "failure" | "finalizing" | 5 more
One of the following:
"canceled"
"failure"
"finalizing"
"in-progress"
"pending"
"queued"
"success"
"warming-up"
updatedAt: string

The job last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

authorId?: string

The author user ID (example: “dcf121faaa1a0a0bbbd9ca1b73d62aea”)

billing?: Billing { cuCost, cuDiscount }

The billing of the job

cuCost: number
cuDiscount: number
ownerId?: string

The owner ID (example: “team_U3Qmc8PCdWXwAQJ4Dvw4tV6D”)

ExampleUpdateResponse { examples }
examples: Array<Example>
asset: Asset { id, authorId, collectionIds, 24 more }

Asset generated by the inference

id: string

The asset ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

authorId: string

The author user ID (example: “dcf121faaa1a0a0bbbd9ca1b73d62aea”)

collectionIds: Array<string>

A list of CollectionId this asset belongs to

createdAt: string

The asset creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

editCapabilities: Array<"DETECTION" | "GENERATIVE_FILL" | "PIXELATE" | 8 more>

List of edit capabilities

One of the following:
"DETECTION"
"GENERATIVE_FILL"
"PIXELATE"
"PROMPT_EDITING"
"REFINE"
"REFRAME"
"REMOVE_BACKGROUND"
"SEGMENTATION"
"UPSCALE"
"UPSCALE_360"
"VECTORIZATION"
kind: "3d" | "audio" | "document" | 4 more

The kind of asset

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
metadata: Metadata { kind, type, angular, 106 more }

Metadata of the asset with some additional information

kind: "3d" | "audio" | "document" | 4 more
One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
type: "3d-texture" | "3d-texture-albedo" | "3d-texture-metallic" | 72 more

The type of the asset. Ex: ‘inference-txt2img’ will represent an asset generated from a text to image model

One of the following:
"3d-texture"
"3d-texture-albedo"
"3d-texture-metallic"
"3d-texture-mtl"
"3d-texture-normal"
"3d-texture-roughness"
"3d23d"
"3d23d-texture"
"audio2audio"
"audio2video"
"background-removal"
"canvas"
"canvas-drawing"
"canvas-export"
"detection"
"generative-fill"
"image-prompt-editing"
"img23d"
"img2img"
"img2video"
"inference-controlnet"
"inference-controlnet-img2img"
"inference-controlnet-inpaint"
"inference-controlnet-inpaint-ip-adapter"
"inference-controlnet-ip-adapter"
"inference-controlnet-reference"
"inference-controlnet-texture"
"inference-img2img"
"inference-img2img-ip-adapter"
"inference-img2img-texture"
"inference-inpaint"
"inference-inpaint-ip-adapter"
"inference-reference"
"inference-reference-texture"
"inference-txt2img"
"inference-txt2img-ip-adapter"
"inference-txt2img-texture"
"patch"
"pixelization"
"reframe"
"restyle"
"segment"
"segmentation-image"
"segmentation-mask"
"skybox-3d"
"skybox-base-360"
"skybox-hdri"
"texture"
"texture-albedo"
"texture-ao"
"texture-edge"
"texture-height"
"texture-metallic"
"texture-normal"
"texture-smoothness"
"txt23d"
"txt2audio"
"txt2img"
"txt2video"
"unknown"
"uploaded"
"uploaded-3d"
"uploaded-audio"
"uploaded-avatar"
"uploaded-video"
"upscale"
"upscale-skybox"
"upscale-texture"
"upscale-video"
"vectorization"
"video23d"
"video2audio"
"video2img"
"video2video"
"voice-clone"
angular?: number

How angular is the surface? 0 is like a sphere, 1 is like a mechanical object

maximum1
minimum0
aspectRatio?: string

The optional aspect ratio given for the generation, only applicable for some models

backgroundOpacity?: number

Int to set between 0 and 255 for the opacity of the background in the result images.

maximum255
minimum0
baseModelId?: string

The baseModelId that maybe changed at inference time

bbox?: Array<number>

A bounding box around the object of interest, in the format [x1, y1, x2, y2].

betterQuality?: boolean

Remove small dark spots (i.e. “pepper”) and connect small bright cracks.

cannyStructureImage?: string

The control image already processed by canny detector. Must reference an existing AssetId.

clustering?: boolean

Activate clustering.

colorCorrection?: boolean

Ensure upscaled tile have the same color histogram as original tile.

colorMode?: string
colorPrecision?: number
concepts?: Array<Concept>

Flux Kontext LoRA to style the image. For Flux Kontext Prompt Editing.

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

contours?: Array<Array<Array<Array<number>>>>
controlEnd?: number

End step for control.

copiedAt?: string

The date when the asset was copied to a project

cornerThreshold?: number
creativity?: number

Allow the generation of “hallucinations” during the upscale process, which adds additional details and deviates from the original image. Default: optimized for your preset and style.

maximum100
minimum0
creativityDecay?: number

Amount of decay in creativity over the upscale process. The lowest the value, the less the creativity will be preserved over the upscale process.

maximum100
minimum0
defaultParameters?: boolean

If true, use the default parameters

depthFidelity?: number

The depth fidelity if a depth image provided

maximum100
minimum0
depthImage?: string

The control image processed by depth estimator. Must reference an existing AssetId.

detailsLevel?: number

Amount of details to remove or add

maximum50
minimum-50
dilate?: number

The number of pixels to dilate the result masks.

maximum30
minimum0
factor?: number

Contrast factor for Grayscale detector

filterSpeckle?: number
fractality?: number

Determine the scale at which the upscale process works.

  • With a small value, the upscale works at the largest scale, resulting in fewer added details and more coherent images. Ideal for portraits, for example.
  • With a large value, the upscale works at the smallest scale, resulting in more added details and more hallucinations. Ideal for landscapes, for example.

(info): A small value is slower and more expensive to run.

maximum100
minimum0
geometryEnforcement?: number

Apply extra control to the Skybox 360 geometry. The higher the value, the more the 360 geometry will influence the generated skybox image.

Use with caution. Default is adapted to the other parameters.

maximum100
minimum0
guidance?: number

The guidance used to generate this asset

halfMode?: boolean
hdr?: number
height?: number
highThreshold?: number

High threshold for Canny detector

horizontalExpansionRatio?: number

(deprecated) Horizontal expansion ratio.

maximum2
minimum1
image?: string

The input image to process. Must reference an existing AssetId or be a data URL.

imageFidelity?: number

Strengthen the similarity to the original image during the upscale. Default: optimized for your preset and style.

maximum100
minimum0
imageType?: "seamfull" | "skybox" | "texture"

Preserve the seamless properties of skybox or texture images. Input has to be of same type (seamless).

One of the following:
"seamfull"
"skybox"
"texture"
inferenceId?: string

The id of the Inference describing how this image was generated

inputFidelity?: "high" | "low"

When set to high, allows to better preserve details from the input images in the output. This is especially useful when using images that contain elements like faces or logos that require accurate preservation in the generated image.

You can provide multiple input images that will all be preserved with high fidelity, but keep in mind that the first image will be preserved with richer textures and finer details, so if you include elements such as faces, consider placing them in the first image.

Only available for the gpt-image-1 model.

One of the following:
"high"
"low"
inputLocation?: "bottom" | "left" | "middle" | 2 more

Location of the input image in the output.

One of the following:
"bottom"
"left"
"middle"
"right"
"top"
invert?: boolean

To invert the relief

keypointThreshold?: number

How polished is the surface? 0 is like a rough surface, 1 is like a mirror

maximum1
minimum0
layerDifference?: number
lengthThreshold?: number
lockExpiresAt?: string

The ISO timestamp when the lock on the canvas will expire

lowThreshold?: number

Low threshold for Canny detector

mask?: string

The mask used for the asset generation or editing

maxIterations?: number
maxThreshold?: number

Maximum threshold for Grayscale conversion

minThreshold?: number

Minimum threshold for Grayscale conversion

modality?: "canny" | "depth" | "grayscale" | 7 more

Modality to detect

One of the following:
"canny"
"depth"
"grayscale"
"lineart_anime"
"mlsd"
"normal"
"pose"
"scribble"
"segmentation"
"sketch"
mode?: string
modelId?: string

The modelId used to generate this asset

modelType?: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The type of the generator used

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
name?: string
nbMasks?: number
negativePrompt?: string

The negative prompt used to generate this asset

negativePromptStrength?: number

Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence. Must be > 0 if negativePrompt is provided.

maximum10
minimum0
numInferenceSteps?: number

The number of denoising steps for each image generation.

maximum50
minimum5
numOutputs?: number

The number of outputs to generate.

maximum8
minimum1
originalAssetId?: string
outputIndex?: number
overlapPercentage?: number

Overlap percentage for the output image.

maximum0.5
minimum0
overrideEmbeddings?: boolean

Override the embeddings of the model. Only your prompt and negativePrompt will be used. Use with caution.

parentId?: string
parentJobId?: string
pathPrecision?: number
points?: Array<Array<number>>

List of points (label, x, y) in the image where label = 0 for background and 1 for object.

polished?: number

How polished is the surface? 0 is like a rough surface, 1 is like a mirror

maximum1
minimum0
preset?: string
progressPercent?: number
prompt?: string

The prompt that guided the asset generation or editing

promptFidelity?: number

Increase the fidelity to the prompt during upscale. Default: optimized for your preset and style.

maximum100
minimum0
raised?: number

How raised is the surface? 0 is flat like water, 1 is like a very rough rock

maximum1
minimum0
referenceImages?: Array<string>

The reference images used for the asset generation or editing

refinementSteps?: number

Additional refinement steps before scaling.

If scalingFactor == 1, the refinement process will be applied (1 + refinementSteps) times. If scalingFactor > 1, the refinement process will be applied refinementSteps times.

maximum4
minimum0
removeBackground?: boolean

Remove background for Grayscale detector

resizeOption?: number

Size proportion of the input image in the output.

maximum1
minimum0.1
resultContours?: boolean

Boolean to output the contours.

resultImage?: boolean

Boolean to able output the cut out object.

resultMask?: boolean

Boolean to able return the masks (binary image) in the response.

rootParentId?: string
saveFlipbook?: boolean

Save a flipbook of the texture. Deactivated when the input texture is larger than 2048x2048px

scalingFactor?: number

Scaling factor (when targetWidth not specified)

maximum16
minimum1
scheduler?: string

The scheduler used to generate this asset

seed?: string

The seed used to generate this asset. <!> Can be a string or a number in some cases <!>.

sharpen?: boolean

Sharpen tiles.

shiny?: number

How shiny is the surface? 0 is like a matte surface, 1 is like a diamond

maximum1
minimum0
size?: number
sketch?: boolean

Activate sketch detection instead of canny.

sourceProjectId?: string
spliceThreshold?: number
strength?: number

The strength

Only available for the flux-kontext LoRA model.

structureFidelity?: number

Strength for the input image structure preservation

maximum100
minimum0
structureImage?: string

The control image for structure. A canny detector will be applied to this image. Must reference an existing AssetId.

style?: "3d-cartoon" | "3d-rendered" | "anime" | 23 more
One of the following:
"3d-cartoon"
"3d-rendered"
"anime"
"cartoon"
"cinematic"
"claymation"
"cloud-skydome"
"comic"
"cyberpunk"
"enchanted"
"fantasy"
"ink"
"manga"
"manga-color"
"minimalist"
"neon-tron"
"oil-painting"
"pastel"
"photo"
"photography"
"psychedelic"
"retro-fantasy"
"scifi-concept-art"
"space"
"standard"
"whimsical"
styleFidelity?: number

The higher the value the more it will look like the style image(s)

maximum100
minimum0
styleImages?: Array<string>

List of style images. Most of the time, only one image is enough. It must be existing AssetIds.

styleImagesFidelity?: number

Condition the influence of the style image(s). The higher the value, the more the style images will influence the upscaled image.

maximum100
minimum0
targetHeight?: number

The target height of the output image.

maximum2048
minimum0
targetWidth?: number

Target width for the upscaled image, take priority over scaling factor

maximum16000
minimum1024
text?: string

A textual description / keywords describing the object of interest.

maxLength100
texture?: string

The asset to convert in texture maps. Must reference an existing AssetId.

thumbnail?: Thumbnail { assetId, url }

The thumbnail of the canvas

assetId: string

The AssetId of the image used as a thumbnail for the canvas (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for the canvas

tileStyle?: boolean

If set to true, during the upscaling process, the model will match tiles of the source image with tiles of the style image(s). This will result in a more coherent restyle. Works best with style images that have a similar composition.

trainingImage?: boolean
verticalExpansionRatio?: number

(deprecated) Vertical expansion ratio.

maximum2
minimum1
width?: number

The width of the rendered image.

maximum2048
minimum1024
mimeType: string

The mime type of the asset (example: “image/png”)

ownerId: string

The owner (project) ID (example: “proj_23tlk332lkht3kl2” or “team_dlkhgs23tlk3hlkth32lkht3kl2” for old teams)

privacy: "private" | "public" | "unlisted"

The privacy of the asset

One of the following:
"private"
"public"
"unlisted"
properties: Properties { size, animationFrameCount, bitrate, 20 more }

The properties of the asset, content may depend on the kind of asset returned

size: number
animationFrameCount?: number

Number of animation frames if animations exist

bitrate?: number

Bitrate of the media in bits per second

boneCount?: number

Number of bones if skeleton exists

channels?: number

Number of channels of the audio

classification?: "effect" | "interview" | "music" | 5 more

Classification of the audio

One of the following:
"effect"
"interview"
"music"
"other"
"sound"
"speech"
"text"
"unknown"
codecName?: string

Codec name of the media

description?: string

Description of the audio

dimensions?: Array<number>

Bounding box dimensions [width, height, depth]

duration?: number

Duration of the media in seconds

faceCount?: number

Number of faces/triangles in the mesh

format?: string

Format of the mesh file (e.g. ‘glb’, etc.)

frameRate?: number

Frame rate of the video in frames per second

hasAnimations?: boolean

Whether the mesh has animations

hasNormals?: boolean

Whether the mesh has normal vectors

hasSkeleton?: boolean

Whether the mesh has bones/skeleton

hasUVs?: boolean

Whether the mesh has UV coordinates

height?: number
nbFrames?: number

Number of frames in the video

sampleRate?: number

Sample rate of the media in Hz

transcription?: Transcription { text }

Transcription of the audio

text: string
vertexCount?: number

Number of vertices in the mesh

width?: number
source: "3d23d" | "3d23d:texture" | "3d:texture" | 72 more

source of the asset

One of the following:
"3d23d"
"3d23d:texture"
"3d:texture"
"3d:texture:albedo"
"3d:texture:metallic"
"3d:texture:mtl"
"3d:texture:normal"
"3d:texture:roughness"
"audio2audio"
"audio2video"
"background-removal"
"canvas"
"canvas-drawing"
"canvas-export"
"detection"
"generative-fill"
"image-prompt-editing"
"img23d"
"img2img"
"img2video"
"inference-control-net"
"inference-control-net-img"
"inference-control-net-inpainting"
"inference-control-net-inpainting-ip-adapter"
"inference-control-net-ip-adapter"
"inference-control-net-reference"
"inference-control-net-texture"
"inference-img"
"inference-img-ip-adapter"
"inference-img-texture"
"inference-in-paint"
"inference-in-paint-ip-adapter"
"inference-reference"
"inference-reference-texture"
"inference-txt"
"inference-txt-ip-adapter"
"inference-txt-texture"
"patch"
"pixelization"
"reframe"
"restyle"
"segment"
"segmentation-image"
"segmentation-mask"
"skybox-3d"
"skybox-base-360"
"skybox-hdri"
"texture"
"texture:albedo"
"texture:ao"
"texture:edge"
"texture:height"
"texture:metallic"
"texture:normal"
"texture:smoothness"
"txt23d"
"txt2audio"
"txt2img"
"txt2video"
"unknown"
"uploaded"
"uploaded-3d"
"uploaded-audio"
"uploaded-avatar"
"uploaded-video"
"upscale"
"upscale-skybox"
"upscale-texture"
"upscale-video"
"vectorization"
"video23d"
"video2audio"
"video2img"
"video2video"
"voice-clone"
status: "error" | "pending" | "success"

The actual status

One of the following:
"error"
"pending"
"success"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

updatedAt: string

The asset last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

url: string

Signed URL to get the asset content

automaticCaptioning?: string

Automatic captioning of the asset

description?: string

The description, it will contain in priority:

  • the manual description
  • the advanced captioning when the asset is used in training flow
  • the automatic captioning
embedding?: Array<number>

The embedding of the asset when requested.

Only available when an asset can be embedded (ie: not Detection maps)

firstFrame?: FirstFrame { assetId, url }

The video asset’s first frame.

Contains the assetId and the url of the first frame.

assetId: string
url: string
isHidden?: boolean

Whether the asset is hidden.

lastFrame?: LastFrame { assetId, url }

The video asset’s last frame.

Contains the assetId and the url of the last frame.

assetId: string
url: string
nsfw?: Array<string>

The NSFW labels

originalFileUrl?: string

The original file url.

Contains the url of the original file. without any conversion. Only available for some specific video, audio and threeD assets. Is only specified if the given asset data has been replaced with a new file during the creation of the asset.

outputIndex?: number

The output index of the asset within a job This index is an positive integer that starts at 0 It is used to differentiate between multiple outputs of the same job If the job has only one output, this index is 0

preview?: Preview { assetId, url }

The asset’s preview.

Contains the assetId and the url of the preview.

assetId: string
url: string
thumbnail?: Thumbnail { assetId, url }

The asset’s thumbnail.

Contains the assetId and the url of the thumbnail.

assetId: string
url: string
modelId: string

Model id of the model used to generate the asset

inferenceId?: string

Inference id of the inference used to generate the asset

inferenceParameters?: InferenceParameters { prompt, type, aspectRatio, 36 more }

The inference parameters used to generate the asset

prompt: string

Full text prompt including the model placeholder. (example: “an illustration of phoenix in a fantasy world, flying over a mountain, 8k, bokeh effect”)

type: "controlnet" | "controlnet_img2img" | "controlnet_inpaint" | 15 more

The type of inference to use. Example: txt2img, img2img, etc.

Selecting the right type will condition the expected parameters.

Note: if model.type is sd-xl* or sd-1_5*, when using the "inpaint" inference type, Scenario determines the best available baseModel for a given modelId: one of `[“stable-diffusion-inpainting”, “stable-diffusion-xl-1.0-inpainting-0.1”] will be used.

One of the following:
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
aspectRatio?: "16:9" | "1:1" | "21:9" | 8 more

The aspect ratio of the generated images. Only used for the model flux.1.1-pro-ultra. The aspect ratio is a string formatted as “width:height” (example: “16:9”).

One of the following:
"16:9"
"1:1"
"21:9"
"2:3"
"3:2"
"3:4"
"4:3"
"4:5"
"5:4"
"9:16"
"9:21"
baseModelId?: string

The base model to use for the inference. Only Flux LoRA models can use this parameter. Allowed values are available in the model’s attribute: compliantModelIds

concepts?: Array<Concept>
modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

controlEnd?: number

Specifies how long the ControlNet guidance should be applied during the inference process.

Only available for Flux.1-dev based models.

The value represents the percentage of total inference steps where the ControlNet guidance is active. For example:

  • 1.0: ControlNet guidance is applied during all inference steps
  • 0.5: ControlNet guidance is only applied during the first half of inference steps

Default values:

  • 0.5 for Canny modality
  • 0.6 for all other modalities
maximum1
minimum0.1
controlImage?: string

Signed URL to display the controlnet input image

controlImageId?: string

Asset id of the controlnet input image

controlStart?: number

Specifies the starting point of the ControlNet guidance during the inference process.

Only available for Flux.1-dev based models.

The value represents the percentage of total inference steps where the ControlNet guidance starts. For example:

  • 0.0: ControlNet guidance starts at the beginning of the inference steps
  • 0.5: ControlNet guidance starts at the middle of the inference steps
maximum0.9
minimum0
disableMerging?: boolean

If set to true, the entire input image will likely change during inpainting. This results in faster inferences, but the output image will be harder to integrate if the input is just a small part of a larger image.

disableModalityDetection?: boolean

If false, the process uses the given image to detect the modality. If true (default), the process will not try to detect the modality of the given image.

For example: with pose modality and false value, the process will detect the pose of people in the given image with depth modality and false value, the process will detect the depth of the given image with scribble modality and truevalue, the process will use the given image as a scribble

⚠️ For models of the FLUX schnell or dev families, this parameter is ignored. The modality detection is always disabled. ⚠️

guidance?: number

Controls how closely the generated image follows the prompt. Higher values result in stronger adherence to the prompt. Default and allowed values depend on the model type:

  • For Flux dev models, the default is 3.5 and allowed values are within [0, 10]
  • For Flux pro models, the default is 3 and allowed values are within [2, 5]
  • For SDXL models, the default is 6 and allowed values are within [0, 20]
  • For SD1.5 models, the default is 7.5 and allowed values are within [0, 20]
maximum20
minimum0
height?: number

The height of the generated images, must be a 8 multiple (within [64, 2048], default: 512) If model.type is sd-xl, sd-xl-lora, sd-xl-composition the height must be within [512, 2048] If model.type is sd-1_5, the height must be within [64, 1024] If model.type is flux.1.1-pro-ultra, you can use the aspectRatio parameter instead

maximum2048
minimum64
multipleOf8
hideResults?: boolean

If set, generated assets will be hidden and not returned in the list of images of the inference or when listing assets (default: false)

image?: string

Signed URL to display the input image

imageId?: string

Asset id of the input image

intermediateImages?: boolean

Enable or disable the intermediate images generation (default: false)

ipAdapterImage?: string

Signed URL to display the IpAdapter image

ipAdapterImageId?: string

Asset id of the input IpAdapter image

ipAdapterImageIds?: Array<string>

Asset id of the input IpAdapter images

ipAdapterImages?: Array<string>

Signed URL to display the IpAdapter images

ipAdapterScale?: number

IpAdapter scale factor (within [0.0, 1.0], default: 0.9).

maximum1
minimum0
ipAdapterScales?: Array<number>

IpAdapter scale factors (within [0.0, 1.0], default: 0.9).

maximum1
minimum0
ipAdapterType?: "character" | "style"

The type of IP Adapter model to use. Must be one of [style, character], default to `style“

One of the following:
"character"
"style"
mask?: string

Signed URL to display the mask image

maskId?: string

Asset id of the mask image

modality?: string

The modality associated with the control image used for the generation: it can either be an object with a combination of maximum

For models of SD1.5 family:

  • up to 3 modalities from canny, pose, depth, lines, seg, scribble, lineart, normal-map, illusion
  • or one of the following presets: character, landscape, city, interior.

For models of the SDXL family:

  • up to 3 modalities from canny, pose, depth, seg, illusion, scribble
  • or one of the following presets: character, landscape.

For models of the FLUX schnell or dev families:

  • one modality from: canny, tile, depth, blur, pose, gray, low-quality

Optionally, you can associate a value to these modalities or presets. The value must be within ]0.0, 1.0].

Examples:

  • canny
  • depth:0.5,pose:1.0
  • canny:0.5,depth:0.5,lines:0.3
  • landscape
  • character:0.5
  • illusion:1

Note: if you use a value that is not supported by the model family, this will result in an error.

modelEpoch?: string

The epoch of the model to use for the inference. Only available for Flux Lora Trained models.

negativePrompt?: string

The prompt not to guide the image generation, ignored when guidance < 1 (example: “((ugly face))”) For Flux based model (not Fast-Flux): requires negativePromptStrength > 0 and active only for inference types txt2img / img2img / controlnet.

negativePromptStrength?: number

Only applicable for flux-dev based models for txt2img, img2img, and controlnet inference types.

Controls the influence of the negative prompt. Default 0 means the negative prompt has no effect. Higher values increase negative prompt influence. Must be > 0 if negativePrompt is provided.

maximum10
minimum0
numInferenceSteps?: number

The number of denoising steps for each image generation (within [1, 150], default: 30)

maximum150
minimum1
numSamples?: number

The number of images to generate (within [1, 128], default: 4)

maximum128
minimum1
referenceAdain?: boolean

Whether to use reference adain Only for “reference” inference type

referenceAttn?: boolean

Whether to use reference query for self attention’s context Only for “reference” inference type

scheduler?: "DDIMScheduler" | "DDPMScheduler" | "DEISMultistepScheduler" | 12 more

The scheduler to use to override the default configured for the model. See detailed documentation for more details.

One of the following:
"DDIMScheduler"
"DDPMScheduler"
"DEISMultistepScheduler"
"DPMSolverMultistepScheduler"
"DPMSolverSinglestepScheduler"
"EulerAncestralDiscreteScheduler"
"EulerDiscreteScheduler"
"HeunDiscreteScheduler"
"KDPM2AncestralDiscreteScheduler"
"KDPM2DiscreteScheduler"
"LCMScheduler"
"LMSDiscreteScheduler"
"PNDMScheduler"
"TCDScheduler"
"UniPCMultistepScheduler"
seed?: string

Used to reproduce previous results. Default: randomly generated number.

maximum2147483647
minimum0
strength?: number

Controls the noise intensity introduced to the input image, where a value of 1.0 completely erases the original image’s details. Available for img2img and inpainting. (within [0.01, 1.0], default: 0.75)

maximum1
minimum0.01
styleFidelity?: number

If style_fidelity=1.0, control more important, else if style_fidelity=0.0, prompt more important, else balanced Only for “reference” inference type

maximum1
minimum0
width?: number

The width of the generated images, must be a 8 multiple (within [64, 2048], default: 512) If model.type is sd-xl, sd-xl-lora, sd-xl-composition the width must be within [512, 2048] If model.type is sd-1_5, the width must be within [64, 1024] If model.type is flux.1.1-pro-ultra, you can use the aspectRatio parameter instead

maximum2048
minimum64
multipleOf8
job?: Job { createdAt, jobId, jobType, 8 more }

The job associated with the asset

createdAt: string

The job creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

jobId: string

The job ID (example: “job_ocZCnG1Df35XRL1QyCZSRxAG8”)

jobType: "assets-download" | "canvas-export" | "caption" | 36 more

The type of job

One of the following:
"assets-download"
"canvas-export"
"caption"
"caption-llava"
"custom"
"describe-style"
"detection"
"embed"
"flux"
"flux-model-training"
"generate-prompt"
"image-generation"
"image-prompt-editing"
"inference"
"mesh-preview-rendering"
"model-download"
"model-import"
"model-training"
"musubi-model-training"
"openai-image-generation"
"patch-image"
"pixelate"
"reframe"
"remove-background"
"repaint"
"restyle"
"segment"
"skybox-3d"
"skybox-base-360"
"skybox-hdri"
"skybox-upscale-360"
"texture"
"translate"
"upload"
"upscale"
"upscale-skybox"
"upscale-texture"
"vectorize"
"workflow"
metadata: Metadata { assetIds, error, flow, 6 more }

Metadata of the job with some additional information

assetIds?: Array<string>

List of produced assets for this job

error?: string | null

Eventual error for the job

flow?: Array<Flow>

The flow of the job. Only available for workflow jobs.

id: string

The id of the node.

status: "failure" | "pending" | "processing" | 2 more

The status of the node. Only available for WorkflowJob nodes.

One of the following:
"failure"
"pending"
"processing"
"skipped"
"success"
type: "custom-model" | "for-each" | "generate-prompt" | 7 more

The type of the job for the node.

One of the following:
"custom-model"
"for-each"
"generate-prompt"
"list"
"logic"
"model"
"remove-background"
"transform"
"user-approval"
"workflow"
assets?: Array<Asset>

List of produced assets for this node.

assetId: string
url: string
count?: number

Fixed number of iterations for a ForEach node. When set, the loop runs exactly count times regardless of array input. When not set, the loop iterates over the resolved array input. Only available for ForEach nodes.

dependsOn?: Array<string>

The nodes that this node depends on. Only available for nodes that have dependencies. Mainly used for user approval nodes.

includeOutputsInWorkflowJob?: true

If true, the outputs of this node will be included in the workflow job’s final output. Only applicable to producing nodes (custom-model, inference, etc.). By default, only last nodes (nodes not referenced by other nodes) contribute to outputs. Set this to true to also include intermediate nodes in the final output. Note: This should only be set to true or left undefined.

inputs?: Array<Input>

The inputs of the node.

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

items?: Array<Array<Item>>

The configured items for inputs_array type inputs. Each item is an array of SubNodeInput that need ref/value resolution. Only available for inputs_array type.

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

ref?: Ref { conditional, equal, name, node }

The reference to another input or output of the same workflow. Must have at least one of node or conditional.

conditional?: Array<string>

The conditional nodes to reference. If the conditional nodes are successful, the node will be successful. If the conditional nodes are skipped, the node will be skipped. Contains an array of node ids used to check the status of the nodes.

equal?: string

This is the desired node output value if ref is an if/else node.

name?: string

The name of the input or output to reference. If the type is ‘workflow’, the name is the name of the input of the workflow is required If the type is ‘node’, the name is not mandatory, except if you want all outputs of the node. To get all outputs of a node, you can use the name ‘all’.

node?: string

The node id or ‘workflow’ if the source is a workflow input.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
value?: unknown

The value of the input. This is the value of the input that will be used to run the node. Only available for flows managed by a WorkflowJob.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

ref?: Ref { conditional, equal, name, node }

The reference to another input or output of the same workflow. Must have at least one of node or conditional.

conditional?: Array<string>

The conditional nodes to reference. If the conditional nodes are successful, the node will be successful. If the conditional nodes are skipped, the node will be skipped. Contains an array of node ids used to check the status of the nodes.

equal?: string

This is the desired node output value if ref is an if/else node.

name?: string

The name of the input or output to reference. If the type is ‘workflow’, the name is the name of the input of the workflow is required If the type is ‘node’, the name is not mandatory, except if you want all outputs of the node. To get all outputs of a node, you can use the name ‘all’.

node?: string

The node id or ‘workflow’ if the source is a workflow input.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
value?: unknown

The value of the input. This is the value of the input that will be used to run the node. Only available for flows managed by a WorkflowJob.

items?: Array<string>

Statically-configured items for a List node. The node outputs this array as-is when executed. Only available for List nodes. The values can be strings, numbers, or asset IDs.

iterationIndex?: number

Zero-based index of the iteration this node copy belongs to. Set on dynamically-created copies of loop body nodes.

jobId?: string

If the flow is part of a WorkflowJob, this is the jobId for the node. jobId is only available for nodes started. A node “Pending” for a running workflow job is not started.

logic?: Logic { cases, default, transform }

The logic of the node. Only available for logic nodes.

cases?: Array<Case>

The cases of the logic. Only available for if/else nodes.

condition: string
value: string
default?: string

The default case of the logic. Contains the id/output of the node to execute if no case is matched. Only available for if/else nodes.

transform?: string

The transform of the logic. Only available for transform nodes.

logicType?: "if-else"

The type of the logic for the node. Only available for logic nodes.

loopBodyNodeIds?: Array<string>

IDs of the body template nodes that belong to this ForEach loop. At runtime these templates are cloned once per iteration and marked Skipped. Only available for ForEach nodes.

loopNodeId?: string

ID of the ForEach node that spawned this iteration copy. Set on dynamically-created copies of loop body nodes.

modelId?: string

The model id for the node. Mainly used for custom model tasks.

output?: unknown

The output of the node. Only available for logic nodes.

workflowId?: string

The workflow id for the node. Mainly used for workflow tasks.

hint?: string

Actionable hint for the user explaining what went wrong and how to resolve it.

input?: Record<string, unknown>

The inputs for the job

output?: Record<string, unknown>

May contain the output of the job for specific custom models jobs. Only available for custom models which generate non-assets outputs. Example: LLM text results.

outputModelId?: string

For voice-clone jobs: the ID of the model being trained.

workflowId?: string

The workflow ID of the job if job is part of a workflow.

workflowJobId?: string

The workflow job ID of the job if job is part of a workflow job.

progress: number

Progress of the job (between 0 and 1)

status: "canceled" | "failure" | "finalizing" | 5 more

The current status of the job

One of the following:
"canceled"
"failure"
"finalizing"
"in-progress"
"pending"
"queued"
"success"
"warming-up"
statusHistory: Array<StatusHistory>

The history of the different statuses the job went through with the ISO string date of when the job reached each statuses.

date: string
status: "canceled" | "failure" | "finalizing" | 5 more
One of the following:
"canceled"
"failure"
"finalizing"
"in-progress"
"pending"
"queued"
"success"
"warming-up"
updatedAt: string

The job last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

authorId?: string

The author user ID (example: “dcf121faaa1a0a0bbbd9ca1b73d62aea”)

billing?: Billing { cuCost, cuDiscount }

The billing of the job

cuCost: number
cuDiscount: number
ownerId?: string

The owner ID (example: “team_U3Qmc8PCdWXwAQJ4Dvw4tV6D”)

ModelsTrain

Trigger
client.models.train.trigger(stringmodelID, TrainTriggerParams { dryRun, originalAssets, trainingImagesCount, parameters } params, RequestOptionsoptions?): TrainTriggerResponse { job, model, creativeUnitsCost, creativeUnitsDiscount }
PUT/models/{modelId}/train
Action
client.models.train.action(stringmodelID, TrainActionParams { action, originalAssets } params, RequestOptionsoptions?): TrainActionResponse { model }
POST/models/{modelId}/train/action
ModelsExpand Collapse
TrainTriggerResponse { job, model, creativeUnitsCost, creativeUnitsDiscount }
job: Job { createdAt, jobId, jobType, 8 more }
createdAt: string

The job creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

jobId: string

The job ID (example: “job_ocZCnG1Df35XRL1QyCZSRxAG8”)

jobType: "assets-download" | "canvas-export" | "caption" | 36 more

The type of job

One of the following:
"assets-download"
"canvas-export"
"caption"
"caption-llava"
"custom"
"describe-style"
"detection"
"embed"
"flux"
"flux-model-training"
"generate-prompt"
"image-generation"
"image-prompt-editing"
"inference"
"mesh-preview-rendering"
"model-download"
"model-import"
"model-training"
"musubi-model-training"
"openai-image-generation"
"patch-image"
"pixelate"
"reframe"
"remove-background"
"repaint"
"restyle"
"segment"
"skybox-3d"
"skybox-base-360"
"skybox-hdri"
"skybox-upscale-360"
"texture"
"translate"
"upload"
"upscale"
"upscale-skybox"
"upscale-texture"
"vectorize"
"workflow"
metadata: Metadata { assetIds, error, flow, 6 more }

Metadata of the job with some additional information

assetIds?: Array<string>

List of produced assets for this job

error?: string | null

Eventual error for the job

flow?: Array<Flow>

The flow of the job. Only available for workflow jobs.

id: string

The id of the node.

status: "failure" | "pending" | "processing" | 2 more

The status of the node. Only available for WorkflowJob nodes.

One of the following:
"failure"
"pending"
"processing"
"skipped"
"success"
type: "custom-model" | "for-each" | "generate-prompt" | 7 more

The type of the job for the node.

One of the following:
"custom-model"
"for-each"
"generate-prompt"
"list"
"logic"
"model"
"remove-background"
"transform"
"user-approval"
"workflow"
assets?: Array<Asset>

List of produced assets for this node.

assetId: string
url: string
count?: number

Fixed number of iterations for a ForEach node. When set, the loop runs exactly count times regardless of array input. When not set, the loop iterates over the resolved array input. Only available for ForEach nodes.

dependsOn?: Array<string>

The nodes that this node depends on. Only available for nodes that have dependencies. Mainly used for user approval nodes.

includeOutputsInWorkflowJob?: true

If true, the outputs of this node will be included in the workflow job’s final output. Only applicable to producing nodes (custom-model, inference, etc.). By default, only last nodes (nodes not referenced by other nodes) contribute to outputs. Set this to true to also include intermediate nodes in the final output. Note: This should only be set to true or left undefined.

inputs?: Array<Input>

The inputs of the node.

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

items?: Array<Array<Item>>

The configured items for inputs_array type inputs. Each item is an array of SubNodeInput that need ref/value resolution. Only available for inputs_array type.

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

ref?: Ref { conditional, equal, name, node }

The reference to another input or output of the same workflow. Must have at least one of node or conditional.

conditional?: Array<string>

The conditional nodes to reference. If the conditional nodes are successful, the node will be successful. If the conditional nodes are skipped, the node will be skipped. Contains an array of node ids used to check the status of the nodes.

equal?: string

This is the desired node output value if ref is an if/else node.

name?: string

The name of the input or output to reference. If the type is ‘workflow’, the name is the name of the input of the workflow is required If the type is ‘node’, the name is not mandatory, except if you want all outputs of the node. To get all outputs of a node, you can use the name ‘all’.

node?: string

The node id or ‘workflow’ if the source is a workflow input.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
value?: unknown

The value of the input. This is the value of the input that will be used to run the node. Only available for flows managed by a WorkflowJob.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

ref?: Ref { conditional, equal, name, node }

The reference to another input or output of the same workflow. Must have at least one of node or conditional.

conditional?: Array<string>

The conditional nodes to reference. If the conditional nodes are successful, the node will be successful. If the conditional nodes are skipped, the node will be skipped. Contains an array of node ids used to check the status of the nodes.

equal?: string

This is the desired node output value if ref is an if/else node.

name?: string

The name of the input or output to reference. If the type is ‘workflow’, the name is the name of the input of the workflow is required If the type is ‘node’, the name is not mandatory, except if you want all outputs of the node. To get all outputs of a node, you can use the name ‘all’.

node?: string

The node id or ‘workflow’ if the source is a workflow input.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
value?: unknown

The value of the input. This is the value of the input that will be used to run the node. Only available for flows managed by a WorkflowJob.

items?: Array<string>

Statically-configured items for a List node. The node outputs this array as-is when executed. Only available for List nodes. The values can be strings, numbers, or asset IDs.

iterationIndex?: number

Zero-based index of the iteration this node copy belongs to. Set on dynamically-created copies of loop body nodes.

jobId?: string

If the flow is part of a WorkflowJob, this is the jobId for the node. jobId is only available for nodes started. A node “Pending” for a running workflow job is not started.

logic?: Logic { cases, default, transform }

The logic of the node. Only available for logic nodes.

cases?: Array<Case>

The cases of the logic. Only available for if/else nodes.

condition: string
value: string
default?: string

The default case of the logic. Contains the id/output of the node to execute if no case is matched. Only available for if/else nodes.

transform?: string

The transform of the logic. Only available for transform nodes.

logicType?: "if-else"

The type of the logic for the node. Only available for logic nodes.

loopBodyNodeIds?: Array<string>

IDs of the body template nodes that belong to this ForEach loop. At runtime these templates are cloned once per iteration and marked Skipped. Only available for ForEach nodes.

loopNodeId?: string

ID of the ForEach node that spawned this iteration copy. Set on dynamically-created copies of loop body nodes.

modelId?: string

The model id for the node. Mainly used for custom model tasks.

output?: unknown

The output of the node. Only available for logic nodes.

workflowId?: string

The workflow id for the node. Mainly used for workflow tasks.

hint?: string

Actionable hint for the user explaining what went wrong and how to resolve it.

input?: Record<string, unknown>

The inputs for the job

output?: Record<string, unknown>

May contain the output of the job for specific custom models jobs. Only available for custom models which generate non-assets outputs. Example: LLM text results.

outputModelId?: string

For voice-clone jobs: the ID of the model being trained.

workflowId?: string

The workflow ID of the job if job is part of a workflow.

workflowJobId?: string

The workflow job ID of the job if job is part of a workflow job.

progress: number

Progress of the job (between 0 and 1)

status: "canceled" | "failure" | "finalizing" | 5 more

The current status of the job

One of the following:
"canceled"
"failure"
"finalizing"
"in-progress"
"pending"
"queued"
"success"
"warming-up"
statusHistory: Array<StatusHistory>

The history of the different statuses the job went through with the ISO string date of when the job reached each statuses.

date: string
status: "canceled" | "failure" | "finalizing" | 5 more
One of the following:
"canceled"
"failure"
"finalizing"
"in-progress"
"pending"
"queued"
"success"
"warming-up"
updatedAt: string

The job last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

authorId?: string

The author user ID (example: “dcf121faaa1a0a0bbbd9ca1b73d62aea”)

billing?: Billing { cuCost, cuDiscount }

The billing of the job

cuCost: number
cuDiscount: number
ownerId?: string

The owner ID (example: “team_U3Qmc8PCdWXwAQJ4Dvw4tV6D”)

model: Model { id, capabilities, collectionIds, 35 more }
id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

capabilities: Array<"3d23d" | "audio2audio" | "audio2video" | 29 more>

List of model capabilities (example: [“txt2img”, “img2img”, “txt2img_ip_adapter”, …])

One of the following:
"3d23d"
"audio2audio"
"audio2video"
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img23d"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"img2txt"
"img2video"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt23d"
"txt2audio"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
"txt2txt"
"txt2video"
"video23d"
"video2audio"
"video2img"
"video2video"
collectionIds: Array<string>

A list of CollectionId this model belongs to

createdAt: string

The model creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

custom: boolean

Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint

exampleAssetIds: Array<string>

List of all example asset IDs setup by the model owner

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
source: "civitai" | "huggingface" | "other" | "scenario"

The source of the model

One of the following:
"civitai"
"huggingface"
"other"
"scenario"
status: "copying" | "failed" | "new" | 3 more

The model status

One of the following:
"copying"
"failed"
"new"
"trained"
"training"
"training-canceled"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

trainingImagesNumber: number

The total number of training images

type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
updatedAt: string

The model last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

accessRestrictions?: 0 | 100 | 25 | 2 more

The access restrictions of the model 0: Free plan 25: Creator plan 50: Pro plan 75: Team plan 100: Enterprise plan

One of the following:
0
100
25
50
75
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

class?: Class { category, conceptPrompt, modelId, 5 more }

The class of the model

category: string

The category slug of the class (example: “art-style”)

conceptPrompt: string

The concept prompt of the class (example: “a sks character design”)

modelId: string

The model ID of the class (example: “stable-diffusion-v1-5”)

name: string

The class name (example: “Character Design”)

prompt: string

The class prompt (example: “a character design”)

slug: string

The class slug (example: “art-style-character-design”)

status: "published" | "unpublished"

The class status (only published classes are listed, but unpublished classes can still appear in existing models)

One of the following:
"published"
"unpublished"
thumbnails: Array<string>

Some example images URLs to showcase the class

compliantModelIds?: Array<string>

List of base model IDs compliant with the model (example: [“flux.1-dev”, “flux.1-schnell”]) This attribute is mainly used for Flux LoRA models

concepts?: Array<Concept>

The concepts is required for the type model: composition

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

epoch?: string

The epoch of the model. Only available for Flux Lora Trained models. If not set, uses the final model epoch (latest)

epochs?: Array<Epoch>

The epochs of the model. Only available for Flux Lora Trained models.

epoch: string

The epoch hash to identify the epoch

assets?: Array<Asset>

The assets of the epoch if sample prompts as been supplied during training

assetId: string

The AssetId of the image during training (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the asset

inputs?: Array<Input>

The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
modelKeyword?: string

The model keyword, this is a legacy parameter, please use conceptPrompt in parameters

name?: string

The model name (example: “Cinematic Realism”)

negativePromptEmbedding?: string

Fine-tune the model’s inferences with negative prompt embedding

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

parameters?: Parameters { age, batchSize, classPrompt, 29 more }

The parameters of the model

age?: string

Age group of the voice (for professional cloning)

Only available for ElevenLabs voice training

batchSize?: number

The batch size Less steps, and will increase the learning rate

Only available for Flux LoRA training

maximum4
minimum1
classPrompt?: string

The prompt to specify images in the same class as provided instance images

Only available for SD15 training

cloneType?: string

Type of voice cloning: “instant” (fast) or “professional” (higher quality, requires captcha)

Only available for ElevenLabs voice training

conceptPrompt?: string

The prompt with identifier specifying the instance (or subject) of the class (example: “a daiton dog”)

Default value varies depending on the model type:

  • For SD1.5: “daiton” if no class is associated with the model
  • For SDXL: “daiton”
  • For Flux: ""
gender?: string

Gender of the voice (for professional cloning)

Only available for ElevenLabs voice training

language?: string

Language of the audio samples (ISO 639-1 code)

Only available for ElevenLabs voice training

learningRate?: number

Initial learning rate (after the potential warmup period)

Default value varies depending on the model type:

  • For SD1.5 and SDXL: 0.000005
  • For Flux: 0.0001
exclusiveMinimum
minimum0
learningRateTextEncoder?: number

Initial learning rate (after the potential warmup period) for the text encoder

Maximum [Flux LoRA: 0.001] Default [SDXL: 0.00005 | Flux LoRA: 0.00001] Minimum [SDXL: 0 | Flux LoRA: 0.000001]

exclusiveMinimum
maximum0.001
minimum0
learningRateUnet?: number

Initial learning rate (after the potential warmup period) for the UNet

Only available for SDXL LoRA training

exclusiveMinimum
minimum0
lrScheduler?: "constant" | "constant-with-warmup" | "cosine" | 3 more

The scheduler type to use (default: “constant”)

Only available for SD15 and SDXL LoRA training

One of the following:
"constant"
"constant-with-warmup"
"cosine"
"cosine-with-restarts"
"linear"
"polynomial"
maxTrainSteps?: number

Maximum number of training steps to execute (default: varies depending on the model type)

For SDXL LoRA training, please use numTextTrainSteps and numUNetTrainSteps instead

Default value varies depending on the model type:

  • For SD1.5: round((number of training images * 225) / 3)
  • For SDXL: number of training images * 175
  • For Flux: number of training images * 100

Maximum value varies depending on the model type:

  • For SD1.5 and SDXL: [0, 40000]
  • For Flux: [0, 10000]
maximum40000
minimum0
nbEpochs?: number

The number of epochs to train for

Only available for Flux LoRA training

maximum30
minimum1
nbRepeats?: number

The number of times to repeat the training

Only available for Flux LoRA training

maximum30
minimum1
numTextTrainSteps?: number

The number of training steps for the text encoder

Only available for SDXL LoRA training

maximum40000
minimum0
numUNetTrainSteps?: number

The number of training steps for the UNet

Only available for SDXL LoRA training

maximum40000
minimum0
optimizeFor?: "likeness"

Optimize the model training task for a specific type of input images. The available values are:

  • “likeness”: optimize training for likeness or portrait (targets specific transformer blocks)
  • “all”: train all transformer blocks
  • “none”: train no specific transformer blocks

This parameter controls which double and single transformer blocks are trained during the LoRA training process.

Only available for Flux LoRA training

priorLossWeight?: number

The weight of prior preservation loss

Only available for SD15 and SDXL LoRA training

exclusiveMinimum
maximum1.7976931348623157
minimum0
randomCrop?: boolean

Whether to random crop or center crop images before resizing to the working resolution

Only available for SD15 and SDXL LoRA training

randomCropRatio?: number

Ratio of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
randomCropScale?: number

Scale of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
rank?: number

The dimension of the LoRA update matrices

Only available for SDXL (deprecated), Flux LoRA and Musubi training

Default value varies depending on the model type:

  • For SDXL (deprecated): 64
  • For Flux: 16
  • For Musubi: 64

Each trainer enforces its own tighter limit (Flux LoRA: [2; 64], Musubi: [2; 128])

maximum128
minimum2
removeBackgroundNoise?: boolean

Whether to remove background noise from audio samples before cloning. When enabled, each sample must be at least 5 seconds long.

Only available for ElevenLabs voice training

samplePrompts?: Array<string>

The prompts to use for each epoch Only available for Flux LoRA training

sampleSourceImages?: Array<string>

The sample prompt images (AssetIds) paired with samplePrompts Only available for Flux LoRA training Must be the same length as samplePrompts

scaleLr?: boolean

Whether to scale the learning rate

Note: Legacy parameter, will be ignored

Only available for SD15 and SDXL LoRA training

seed?: number

Used to reproduce previous results. Default: randomly generated number.

Only available for SD15 and SDXL LoRA training

maximum9007199254740991
minimum0
textEncoderTrainingRatio?: number

Whether to train the text encoder or not

Example: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps

Note: Legacy parameter, please use numTextTrainSteps and numUNetTrainSteps

Only available for SD15 and SDXL LoRA training

maximum0.99
minimum0
validationFrequency?: number

Validation frequency. Cannot be greater than maxTrainSteps value

Only available for SD15 and SDXL LoRA training

minimum0
validationPrompt?: string

Validation prompt

Only available for SD15 and SDXL LoRA training

voiceDescription?: string

Description of the voice characteristics

Only available for ElevenLabs voice training

wandbKey?: string

The Weights And Bias key to use for logging. The maximum length is 40 characters

parentModelId?: string

The id of the parent model

performanceStats?: PerformanceStats { variants, default }

Aggregated performance stats

variants: Array<Variant>

Performance metrics per variant

capability: string

The generation capability (example: “txt2img”, “img2video”, “txt2audio”)

computedAt: string

When these stats were last computed (ISO date)

variantKey: string

Unique variant identifier (example: “txt2img:1K”, “img2video:2K”, “txt2audio”)

arenaScore?: ArenaScore { arenaCategory, arenaModelName, fetchedAt, 5 more }

External quality score from arena.ai leaderboard

arenaCategory: string

Arena category (example: “text_to_image”, “image_to_video”)

arenaModelName: string

Model name on arena.ai

fetchedAt: string

When this score was last fetched (ISO date)

rank: number

Rank in the arena category

rating: number

ELO rating

ratingLower: number

ELO rating confidence interval lower bound

ratingUpper: number

ELO rating confidence interval upper bound

votes: number

Number of human votes

costPerAssetMaxCU?: number

Maximum cost per output asset (CU)

costPerAssetMinCU?: number

Minimum cost per output asset (CU)

costPerAssetP50CU?: number

Median cost per output asset (CU)

inferenceLatencyP50Sec?: number

Inference latency P50 per output asset (seconds)

inferenceLatencyP75Sec?: number

Inference latency P75 per output asset (seconds)

resolution?: string

The resolution bucket (example: “0.5K”, “1K”, “2K”, “4K”)

totalLatencyP50Sec?: number

Total latency P50 per output asset, including queue time (seconds)

totalLatencyP75Sec?: number

Total latency P75 per output asset, including queue time (seconds)

default?: string

Default variant key for quick model comparison

promptEmbedding?: string

Fine-tune the model’s inferences with prompt embedding

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

softDeletionOn?: string

The date when the model will be soft deleted (only for Free plan)

thumbnail?: Thumbnail { assetId, url }

A thumbnail for your model

assetId: string

The AssetId of the image used as a thumbnail for your model (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for your model

trainingImagePairs?: Array<TrainingImagePair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

trainingImages?: Array<TrainingImage>

The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId

id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

trainingProgress?: TrainingProgress { stage, updatedAt, position, 3 more }

Additional information about the training progress of the model

stage: "pending" | "pending-captcha" | "queued-for-train" | 2 more

The stage of the request

One of the following:
"pending"
"pending-captcha"
"queued-for-train"
"running-train"
"starting-train"
updatedAt: number

Timestamp in milliseconds of the last time the training progress was updated

position?: number

Position of the job in the queue (ie. the number of job in the queue before this one)

progress?: number

The progress of the job

maximum1
minimum0
remainingTimeMs?: number

The remaining time in milliseconds

startedAt?: number

The timestamp in millisecond marking the start of the process

trainingStats?: TrainingStats { endedAt, queueDuration, startedAt, trainDuration }

Additional information about the model’s training

endedAt?: string

The training end time as an ISO date string

queueDuration?: number

The training queued duration in seconds

startedAt?: string

The training start time as an ISO date string

trainDuration?: number

The training duration in seconds

uiConfig?: UiConfig { inputProperties, lorasComponent, presets, 3 more }

The UI configuration for the model

inputProperties?: Record<string, InputProperties>

Configuration for the input properties

collapsed?: boolean
lorasComponent?: LorasComponent { label, modelInput, scaleInput, modelIdInput }

Configuration for the loras component

label: string

The label of the component

modelInput: string

The input name of the model (model_array)

scaleInput: string

The input name of the scale (number_array)

modelIdInput?: string

The input model id (example: a composition or a single LoRA modelId) If specified, the model id will be attached to the output asset as a metadata If the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated

presets?: Array<Preset>

Configuration for the presets

fields: Array<string>
presets: unknown
resolutionComponent?: ResolutionComponent { heightInput, label, presets, widthInput }

Configuration for the resolution component

heightInput: string

The input name of the height

label: string

The label of the component

presets: Array<Preset>

The resolution presets

height: number
label: string
width: number
widthInput: string

The input name of the width

selects?: Record<string, unknown>

Configuration for the selects

triggerGenerate?: TriggerGenerate { label, after, position }

Configuration for the trigger generate button

label: string
after?: string

The ‘name’ of the input where the trigger generate button will be displayed (after the input). Do not specify both position and after.

position?: "bottom" | "top"

The position of the trigger generate button. If position specified, the button will be displayed at the specified position. Do not specify both position and after.

One of the following:
"bottom"
"top"
userId?: string

(Deprecated) The user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

creativeUnitsCost?: number

The Creative Units cost for the request billed

creativeUnitsDiscount?: number

The Creative Units discount for the request billed

TrainActionResponse { model }
model: Model { id, capabilities, collectionIds, 35 more }
id: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

capabilities: Array<"3d23d" | "audio2audio" | "audio2video" | 29 more>

List of model capabilities (example: [“txt2img”, “img2img”, “txt2img_ip_adapter”, …])

One of the following:
"3d23d"
"audio2audio"
"audio2video"
"controlnet"
"controlnet_img2img"
"controlnet_inpaint"
"controlnet_inpaint_ip_adapter"
"controlnet_ip_adapter"
"controlnet_reference"
"controlnet_texture"
"img23d"
"img2img"
"img2img_ip_adapter"
"img2img_texture"
"img2txt"
"img2video"
"inpaint"
"inpaint_ip_adapter"
"outpaint"
"reference"
"reference_texture"
"txt23d"
"txt2audio"
"txt2img"
"txt2img_ip_adapter"
"txt2img_texture"
"txt2txt"
"txt2video"
"video23d"
"video2audio"
"video2img"
"video2video"
collectionIds: Array<string>

A list of CollectionId this model belongs to

createdAt: string

The model creation date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

custom: boolean

Whether the model is a custom model and can be used only with POST /generate/custom/{modelId} endpoint

exampleAssetIds: Array<string>

List of all example asset IDs setup by the model owner

privacy: "private" | "public" | "unlisted"

The privacy of the model (default: private)

One of the following:
"private"
"public"
"unlisted"
source: "civitai" | "huggingface" | "other" | "scenario"

The source of the model

One of the following:
"civitai"
"huggingface"
"other"
"scenario"
status: "copying" | "failed" | "new" | 3 more

The model status

One of the following:
"copying"
"failed"
"new"
"trained"
"training"
"training-canceled"
tags: Array<string>

The associated tags (example: [“sci-fi”, “landscape”])

trainingImagesNumber: number

The total number of training images

type: "custom" | "elevenlabs-voice" | "flux.1" | 34 more

The model type (example: “flux.1-lora”)

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
updatedAt: string

The model last update date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

accessRestrictions?: 0 | 100 | 25 | 2 more

The access restrictions of the model 0: Free plan 25: Creator plan 50: Pro plan 75: Team plan 100: Enterprise plan

One of the following:
0
100
25
50
75
authorId?: string

The author user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

class?: Class { category, conceptPrompt, modelId, 5 more }

The class of the model

category: string

The category slug of the class (example: “art-style”)

conceptPrompt: string

The concept prompt of the class (example: “a sks character design”)

modelId: string

The model ID of the class (example: “stable-diffusion-v1-5”)

name: string

The class name (example: “Character Design”)

prompt: string

The class prompt (example: “a character design”)

slug: string

The class slug (example: “art-style-character-design”)

status: "published" | "unpublished"

The class status (only published classes are listed, but unpublished classes can still appear in existing models)

One of the following:
"published"
"unpublished"
thumbnails: Array<string>

Some example images URLs to showcase the class

compliantModelIds?: Array<string>

List of base model IDs compliant with the model (example: [“flux.1-dev”, “flux.1-schnell”]) This attribute is mainly used for Flux LoRA models

concepts?: Array<Concept>

The concepts is required for the type model: composition

modelId: string

The model ID (example: “model_eyVcnFJcR92BxBkz7N6g5w”)

scale: number

The scale of the model (example: 1.0) For Flux Kontext Prompt Editing, the scale is between 0 and 2.

maximum2
minimum-2
modelEpoch?: string

The epoch of the model (example: “000001”) Only available for Flux Lora Trained models

epoch?: string

The epoch of the model. Only available for Flux Lora Trained models. If not set, uses the final model epoch (latest)

epochs?: Array<Epoch>

The epochs of the model. Only available for Flux Lora Trained models.

epoch: string

The epoch hash to identify the epoch

assets?: Array<Asset>

The assets of the epoch if sample prompts as been supplied during training

assetId: string

The AssetId of the image during training (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the asset

inputs?: Array<Input>

The inputs of the model. Only used for custom models. To retrieve this list, get it by modelId with GET /models/{modelId}

name: string

The name that must be user to call the model through the API

type: "boolean" | "file" | "file_array" | 7 more

The data type of the input

One of the following:
"boolean"
"file"
"file_array"
"inputs_array"
"model"
"model_array"
"number"
"number_array"
"string"
"string_array"
allowedValues?: Array<unknown>

The allowed values for the input. For `string` or `number` types, creates a single-select dropdown. For `string_array` type, creates a multi-select dropdown.

backgroundBehavior?: "opaque" | "transparent"

Specifies the background behavior for the input. Only available for `file` and `file_array` input types with kind `image`.

One of the following:
"opaque"
"transparent"
color?: boolean

Whether the input is a color or not. Only available for `string` input type.

costImpact?: boolean

Whether this input affects the model’s cost calculation

default?: unknown

The default value for the input

description?: string

Help text displayed in the UI to provide additional information about the input

group?: string

Used to visually group inputs together in the UI. Inputs with the same group value appear consecutively in the UI.

hint?: string

Hint text displayed in the UI as a tooltip to guide the user

inputs?: Array<Record<string, unknown>>

The list of inputs which form an object within a container array. All inputs are the same as the current object. This is only available for type inputs_array inputs.

kind?: "3d" | "audio" | "document" | 4 more

The asset kind of the input. Only taken into account for `file` and `file_array` input types. If model provides multiple kinds, the input will be not able to create the asset on the flight on API side with dataurl without data:kind, prefix

One of the following:
"3d"
"audio"
"document"
"image"
"image-hdr"
"json"
"video"
label?: string

The label displayed in the UI for this input

maskFrom?: string

The name of the file input field to use as the mask source

max?: number

The maximum allowed value. Only available for `number` and `array` input types.

maxLength?: number

The maximum allowed length for `string` inputs. Also applies to each item in `string_array`.

maxSize?: number

The maximum allowed file size in bytes. Only applies to `file` and `file_array` input types. Validated against `asset.properties.size` at job creation time.

min?: number

The minimum allowed value. Only available for `number` and array input types.

minLength?: number

The minimum allowed length for string inputs. Also applies to each item in `string_array`.

modelTypes?: Array<"custom" | "elevenlabs-voice" | "flux.1" | 34 more>

The allowed model types for this input. Example: `[“flux.1-lora”]`. Only available for `model_array` input type.

One of the following:
"custom"
"elevenlabs-voice"
"flux.1"
"flux.1-composition"
"flux.1-kontext-dev"
"flux.1-kontext-lora"
"flux.1-krea-dev"
"flux.1-krea-lora"
"flux.1-lora"
"flux.1-pro"
"flux.1.1-pro-ultra"
"flux.2-dev-edit-lora"
"flux.2-dev-lora"
"flux.2-klein-4b-edit-lora"
"flux.2-klein-4b-lora"
"flux.2-klein-9b-edit-lora"
"flux.2-klein-9b-lora"
"flux.2-klein-base-4b-edit-lora"
"flux.2-klein-base-4b-lora"
"flux.2-klein-base-9b-edit-lora"
"flux.2-klein-base-9b-lora"
"flux1.1-pro"
"gpt-image-1"
"qwen-image-2512-lora"
"qwen-image-edit-2509-lora"
"qwen-image-edit-2511-lora"
"qwen-image-edit-lora"
"qwen-image-lora"
"sd-1_5"
"sd-1_5-composition"
"sd-1_5-lora"
"sd-xl"
"sd-xl-composition"
"sd-xl-lora"
"zimage-de-turbo-lora"
"zimage-lora"
"zimage-turbo-lora"
parent?: boolean

Whether this input represents a parent asset to assign to the produced assets. Only available for `file` and `file_array` input types.

For `file_array`, the parent asset is the first item in the array.

placeholder?: string

Placeholder text for the input. Only available for ‘string’ input type.

prompt?: boolean

Whether the input is a prompt. When true, displays as a text area with prompt spark feature. Only available for `string` input type.

promptSpark?: boolean

Whether the input is used with prompt spark. Only available for `string` input type.

required?: Required { always, conditionalValues, ifDefined, ifNotDefined }

Set of rules that describes when this input is required:

  • `always`: Input is always required
  • `ifNotDefined`: Input is required when another specified input is not defined
  • `ifDefined`: Input is required when another specified input is defined
  • `conditionalValues`: Input is required when another input has a specific value

By default, the input is not required.

always?: boolean

Whether the input is always required

conditionalValues?: unknown

Makes this input required when another input has a specific value:

  • Key: name of the input to check
  • Value: operation and allowed values that trigger the requirement
ifDefined?: unknown

Makes this input required when another input is defined:

  • Key: name of the input that must be defined
  • Value: message to display when this input is required
ifNotDefined?: unknown

Makes this input required when another input is not defined:

  • Key: name of the input that must be undefined
  • Value: message to display when this input is required
step?: number

The step increment for numeric inputs. Only available for `number` input type.

minimum1
modelKeyword?: string

The model keyword, this is a legacy parameter, please use conceptPrompt in parameters

name?: string

The model name (example: “Cinematic Realism”)

negativePromptEmbedding?: string

Fine-tune the model’s inferences with negative prompt embedding

ownerId?: string

The owner ID (example: “team_VFhihHKMRZyDDnZAJwLb2Q”)

parameters?: Parameters { age, batchSize, classPrompt, 29 more }

The parameters of the model

age?: string

Age group of the voice (for professional cloning)

Only available for ElevenLabs voice training

batchSize?: number

The batch size Less steps, and will increase the learning rate

Only available for Flux LoRA training

maximum4
minimum1
classPrompt?: string

The prompt to specify images in the same class as provided instance images

Only available for SD15 training

cloneType?: string

Type of voice cloning: “instant” (fast) or “professional” (higher quality, requires captcha)

Only available for ElevenLabs voice training

conceptPrompt?: string

The prompt with identifier specifying the instance (or subject) of the class (example: “a daiton dog”)

Default value varies depending on the model type:

  • For SD1.5: “daiton” if no class is associated with the model
  • For SDXL: “daiton”
  • For Flux: ""
gender?: string

Gender of the voice (for professional cloning)

Only available for ElevenLabs voice training

language?: string

Language of the audio samples (ISO 639-1 code)

Only available for ElevenLabs voice training

learningRate?: number

Initial learning rate (after the potential warmup period)

Default value varies depending on the model type:

  • For SD1.5 and SDXL: 0.000005
  • For Flux: 0.0001
exclusiveMinimum
minimum0
learningRateTextEncoder?: number

Initial learning rate (after the potential warmup period) for the text encoder

Maximum [Flux LoRA: 0.001] Default [SDXL: 0.00005 | Flux LoRA: 0.00001] Minimum [SDXL: 0 | Flux LoRA: 0.000001]

exclusiveMinimum
maximum0.001
minimum0
learningRateUnet?: number

Initial learning rate (after the potential warmup period) for the UNet

Only available for SDXL LoRA training

exclusiveMinimum
minimum0
lrScheduler?: "constant" | "constant-with-warmup" | "cosine" | 3 more

The scheduler type to use (default: “constant”)

Only available for SD15 and SDXL LoRA training

One of the following:
"constant"
"constant-with-warmup"
"cosine"
"cosine-with-restarts"
"linear"
"polynomial"
maxTrainSteps?: number

Maximum number of training steps to execute (default: varies depending on the model type)

For SDXL LoRA training, please use numTextTrainSteps and numUNetTrainSteps instead

Default value varies depending on the model type:

  • For SD1.5: round((number of training images * 225) / 3)
  • For SDXL: number of training images * 175
  • For Flux: number of training images * 100

Maximum value varies depending on the model type:

  • For SD1.5 and SDXL: [0, 40000]
  • For Flux: [0, 10000]
maximum40000
minimum0
nbEpochs?: number

The number of epochs to train for

Only available for Flux LoRA training

maximum30
minimum1
nbRepeats?: number

The number of times to repeat the training

Only available for Flux LoRA training

maximum30
minimum1
numTextTrainSteps?: number

The number of training steps for the text encoder

Only available for SDXL LoRA training

maximum40000
minimum0
numUNetTrainSteps?: number

The number of training steps for the UNet

Only available for SDXL LoRA training

maximum40000
minimum0
optimizeFor?: "likeness"

Optimize the model training task for a specific type of input images. The available values are:

  • “likeness”: optimize training for likeness or portrait (targets specific transformer blocks)
  • “all”: train all transformer blocks
  • “none”: train no specific transformer blocks

This parameter controls which double and single transformer blocks are trained during the LoRA training process.

Only available for Flux LoRA training

priorLossWeight?: number

The weight of prior preservation loss

Only available for SD15 and SDXL LoRA training

exclusiveMinimum
maximum1.7976931348623157
minimum0
randomCrop?: boolean

Whether to random crop or center crop images before resizing to the working resolution

Only available for SD15 and SDXL LoRA training

randomCropRatio?: number

Ratio of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
randomCropScale?: number

Scale of random crops

Only available for SD15 and SDXL LoRA training

maximum1
minimum0
rank?: number

The dimension of the LoRA update matrices

Only available for SDXL (deprecated), Flux LoRA and Musubi training

Default value varies depending on the model type:

  • For SDXL (deprecated): 64
  • For Flux: 16
  • For Musubi: 64

Each trainer enforces its own tighter limit (Flux LoRA: [2; 64], Musubi: [2; 128])

maximum128
minimum2
removeBackgroundNoise?: boolean

Whether to remove background noise from audio samples before cloning. When enabled, each sample must be at least 5 seconds long.

Only available for ElevenLabs voice training

samplePrompts?: Array<string>

The prompts to use for each epoch Only available for Flux LoRA training

sampleSourceImages?: Array<string>

The sample prompt images (AssetIds) paired with samplePrompts Only available for Flux LoRA training Must be the same length as samplePrompts

scaleLr?: boolean

Whether to scale the learning rate

Note: Legacy parameter, will be ignored

Only available for SD15 and SDXL LoRA training

seed?: number

Used to reproduce previous results. Default: randomly generated number.

Only available for SD15 and SDXL LoRA training

maximum9007199254740991
minimum0
textEncoderTrainingRatio?: number

Whether to train the text encoder or not

Example: For 100 steps and a value of 0.2, it means that the text encoder will be trained for 20 steps and then the UNet for 80 steps

Note: Legacy parameter, please use numTextTrainSteps and numUNetTrainSteps

Only available for SD15 and SDXL LoRA training

maximum0.99
minimum0
validationFrequency?: number

Validation frequency. Cannot be greater than maxTrainSteps value

Only available for SD15 and SDXL LoRA training

minimum0
validationPrompt?: string

Validation prompt

Only available for SD15 and SDXL LoRA training

voiceDescription?: string

Description of the voice characteristics

Only available for ElevenLabs voice training

wandbKey?: string

The Weights And Bias key to use for logging. The maximum length is 40 characters

parentModelId?: string

The id of the parent model

performanceStats?: PerformanceStats { variants, default }

Aggregated performance stats

variants: Array<Variant>

Performance metrics per variant

capability: string

The generation capability (example: “txt2img”, “img2video”, “txt2audio”)

computedAt: string

When these stats were last computed (ISO date)

variantKey: string

Unique variant identifier (example: “txt2img:1K”, “img2video:2K”, “txt2audio”)

arenaScore?: ArenaScore { arenaCategory, arenaModelName, fetchedAt, 5 more }

External quality score from arena.ai leaderboard

arenaCategory: string

Arena category (example: “text_to_image”, “image_to_video”)

arenaModelName: string

Model name on arena.ai

fetchedAt: string

When this score was last fetched (ISO date)

rank: number

Rank in the arena category

rating: number

ELO rating

ratingLower: number

ELO rating confidence interval lower bound

ratingUpper: number

ELO rating confidence interval upper bound

votes: number

Number of human votes

costPerAssetMaxCU?: number

Maximum cost per output asset (CU)

costPerAssetMinCU?: number

Minimum cost per output asset (CU)

costPerAssetP50CU?: number

Median cost per output asset (CU)

inferenceLatencyP50Sec?: number

Inference latency P50 per output asset (seconds)

inferenceLatencyP75Sec?: number

Inference latency P75 per output asset (seconds)

resolution?: string

The resolution bucket (example: “0.5K”, “1K”, “2K”, “4K”)

totalLatencyP50Sec?: number

Total latency P50 per output asset, including queue time (seconds)

totalLatencyP75Sec?: number

Total latency P75 per output asset, including queue time (seconds)

default?: string

Default variant key for quick model comparison

promptEmbedding?: string

Fine-tune the model’s inferences with prompt embedding

shortDescription?: string

The model short description (example: “This model generates highly detailed cinematic scenes.”)

softDeletionOn?: string

The date when the model will be soft deleted (only for Free plan)

thumbnail?: Thumbnail { assetId, url }

A thumbnail for your model

assetId: string

The AssetId of the image used as a thumbnail for your model (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

url: string

The url of the image used as a thumbnail for your model

trainingImagePairs?: Array<TrainingImagePair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

trainingImages?: Array<TrainingImage>

The URLs of the first 3 training images of the model. To retrieve the full set of images, get it by modelId

id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

trainingProgress?: TrainingProgress { stage, updatedAt, position, 3 more }

Additional information about the training progress of the model

stage: "pending" | "pending-captcha" | "queued-for-train" | 2 more

The stage of the request

One of the following:
"pending"
"pending-captcha"
"queued-for-train"
"running-train"
"starting-train"
updatedAt: number

Timestamp in milliseconds of the last time the training progress was updated

position?: number

Position of the job in the queue (ie. the number of job in the queue before this one)

progress?: number

The progress of the job

maximum1
minimum0
remainingTimeMs?: number

The remaining time in milliseconds

startedAt?: number

The timestamp in millisecond marking the start of the process

trainingStats?: TrainingStats { endedAt, queueDuration, startedAt, trainDuration }

Additional information about the model’s training

endedAt?: string

The training end time as an ISO date string

queueDuration?: number

The training queued duration in seconds

startedAt?: string

The training start time as an ISO date string

trainDuration?: number

The training duration in seconds

uiConfig?: UiConfig { inputProperties, lorasComponent, presets, 3 more }

The UI configuration for the model

inputProperties?: Record<string, InputProperties>

Configuration for the input properties

collapsed?: boolean
lorasComponent?: LorasComponent { label, modelInput, scaleInput, modelIdInput }

Configuration for the loras component

label: string

The label of the component

modelInput: string

The input name of the model (model_array)

scaleInput: string

The input name of the scale (number_array)

modelIdInput?: string

The input model id (example: a composition or a single LoRA modelId) If specified, the model id will be attached to the output asset as a metadata If the model-decomposer parser is specified on it, modelInput and scaleInput will be automatically populated

presets?: Array<Preset>

Configuration for the presets

fields: Array<string>
presets: unknown
resolutionComponent?: ResolutionComponent { heightInput, label, presets, widthInput }

Configuration for the resolution component

heightInput: string

The input name of the height

label: string

The label of the component

presets: Array<Preset>

The resolution presets

height: number
label: string
width: number
widthInput: string

The input name of the width

selects?: Record<string, unknown>

Configuration for the selects

triggerGenerate?: TriggerGenerate { label, after, position }

Configuration for the trigger generate button

label: string
after?: string

The ‘name’ of the input where the trigger generate button will be displayed (after the input). Do not specify both position and after.

position?: "bottom" | "top"

The position of the trigger generate button. If position specified, the button will be displayed at the specified position. Do not specify both position and after.

One of the following:
"bottom"
"top"
userId?: string

(Deprecated) The user ID (example: “user_VFhihHKMRZyDDnZAJwLb2Q”)

ModelsTraining Images

Add
client.models.trainingImages.add(stringmodelID, TrainingImageAddParams { originalAssets, assetId, assetIds, 3 more } params, RequestOptionsoptions?): TrainingImageAddResponse { trainingImage }
POST/models/{modelId}/training-images
Replace Pairs
client.models.trainingImages.replacePairs(stringmodelID, TrainingImageReplacePairsParams { body } params, RequestOptionsoptions?): TrainingImageReplacePairsResponse { count, pairs }
PUT/models/{modelId}/training-images/pairs
Replace
client.models.trainingImages.replace(stringtrainingImageID, TrainingImageReplaceParams { modelId, originalAssets, assetId, 4 more } params, RequestOptionsoptions?): TrainingImageReplaceResponse { trainingImage }
PUT/models/{modelId}/training-images/{trainingImageId}
Delete
client.models.trainingImages.delete(stringtrainingImageID, TrainingImageDeleteParams { modelId } params, RequestOptionsoptions?): TrainingImageDeleteResponse
DELETE/models/{modelId}/training-images/{trainingImageId}
ModelsExpand Collapse
TrainingImageAddResponse { trainingImage }
trainingImage: TrainingImage { id, automaticCaptioning, createdAt, 3 more }
id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

TrainingImageReplacePairsResponse { count, pairs }
count: number

Number of training image pairs

pairs: Array<Pair>

Array of training image pairs

instruction?: string

The instruction for the image pair, source to target

sourceId?: string

The source asset ID (must be a training asset)

targetId?: string

The target asset ID (must be a training asset)

TrainingImageReplaceResponse { trainingImage }
trainingImage: TrainingImage { id, automaticCaptioning, createdAt, 3 more }
id: string

The training image ID (example: “asset_GTrL3mq4SXWyMxkOHRxlpw”)

automaticCaptioning: string

Automatic captioning of the image

createdAt: string

The training image upload date as an ISO string (example: “2023-02-03T11:19:41.579Z”)

description: string

Description for the image

downloadUrl: string

The URL of the image

name: string

The original file name of the image (example: “my-training-image.jpg”)

TrainingImageDeleteResponse = unknown