From a04408fc02865b680aff918eb7b5852865c89006 Mon Sep 17 00:00:00 2001 From: github-actions Date: Sat, 25 Oct 2025 00:26:29 +0000 Subject: [PATCH] Update API specifications with fern api update --- fern/apis/api/openapi.json | 2870 ++++++++++++++++++++++++------------ 1 file changed, 1887 insertions(+), 983 deletions(-) diff --git a/fern/apis/api/openapi.json b/fern/apis/api/openapi.json index 55b794f63..18f274fe4 100644 --- a/fern/apis/api/openapi.json +++ b/fern/apis/api/openapi.json @@ -911,7 +911,47 @@ "eq": "2", "gt": "1" } - } + }, + "type": "object" + } + }, + { + "required": false, + "description": "Filter calls by the first scorecard's normalized score.", + "name": "score", + "in": "query", + "schema": { + "properties": { + "eq": { + "type": "string", + "description": "Equal to" + }, + "neq": { + "type": "string", + "description": "Not equal to" + }, + "gt": { + "type": "string", + "description": "Greater than" + }, + "gte": { + "type": "string", + "description": "Greater than or equal to" + }, + "lt": { + "type": "string", + "description": "Less than" + }, + "lte": { + "type": "string", + "description": "Less than or equal to" + } + }, + "example": { + "gte": 80, + "lt": 100 + }, + "type": "object" } }, { @@ -1189,15 +1229,6 @@ "type": "string" } }, - { - "name": "workflowId", - "required": false, - "in": "query", - "description": "This is the unique identifier for the workflow that will be used for the chat.", - "schema": { - "type": "string" - } - }, { "name": "sessionId", "required": false, @@ -4346,6 +4377,53 @@ ] } }, + "/structured-output/run": { + "post": { + "operationId": "StructuredOutputController_run", + "summary": "Run Structured Output", + "parameters": [], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StructuredOutputRunDTO" + } + } + } + }, + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StructuredOutput" + } + } + } + }, + "201": { + "description": "", + "content": { + "application/json": { + "schema": { + "type": "object" + } + } + } + } + }, + "tags": [ + "Structured Outputs" + ], + "security": [ + { + "bearer": [] + } + ] + } + }, "/eval": { "post": { "operationId": "EvalController_create", @@ -4903,58 +4981,78 @@ ] } }, - "/provider/{provider}/{resourceName}": { - "post": { - "operationId": "ProviderResourceController_createProviderResource", - "summary": "Create Provider Resource", + "/observability/scorecard/{id}": { + "get": { + "operationId": "ScorecardController_get", + "summary": "Get Scorecard", "parameters": [ { - "name": "content-type", - "required": true, - "in": "header", - "schema": { - "type": "string" - } - }, - { - "name": "provider", + "name": "id", "required": true, "in": "path", - "description": "The provider (e.g., 11labs)", "schema": { - "enum": [ - "11labs" - ], "type": "string" } - }, + } + ], + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Scorecard" + } + } + } + } + }, + "tags": [ + "Observability/Scorecard" + ], + "security": [ { - "name": "resourceName", + "bearer": [] + } + ] + }, + "patch": { + "operationId": "ScorecardController_update", + "summary": "Update Scorecard", + "parameters": [ + { + "name": "id", "required": true, "in": "path", - "description": "The resource name (e.g., pronunciation-dictionary)", "schema": { - "enum": [ - "pronunciation-dictionary" - ], "type": "string" } } ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateScorecardDTO" + } + } + } + }, "responses": { - "201": { - "description": "Successfully created provider resource", + "200": { + "description": "", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ProviderResource" + "$ref": "#/components/schemas/Scorecard" } } } } }, "tags": [ - "Provider Resources" + "Observability/Scorecard" ], "security": [ { @@ -4962,44 +5060,48 @@ } ] }, - "get": { - "operationId": "ProviderResourceController_getProviderResourcesPaginated", - "summary": "List Provider Resources", + "delete": { + "operationId": "ScorecardController_remove", + "summary": "Delete Scorecard", "parameters": [ { - "name": "provider", + "name": "id", "required": true, "in": "path", - "description": "The provider (e.g., 11labs)", "schema": { - "enum": [ - "11labs" - ], "type": "string" } - }, - { - "name": "resourceName", - "required": true, - "in": "path", - "description": "The resource name (e.g., pronunciation-dictionary)", - "schema": { - "enum": [ - "pronunciation-dictionary" - ], - "type": "string" + } + ], + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Scorecard" + } + } } - }, + } + }, + "tags": [ + "Observability/Scorecard" + ], + "security": [ { - "name": "id", - "required": false, - "in": "query", - "schema": { - "type": "string" - } - }, + "bearer": [] + } + ] + } + }, + "/observability/scorecard": { + "get": { + "operationId": "ScorecardController_getPaginated", + "summary": "List Scorecards", + "parameters": [ { - "name": "resourceId", + "name": "id", "required": false, "in": "query", "schema": { @@ -5123,18 +5225,53 @@ ], "responses": { "200": { - "description": "List of provider resources", + "description": "", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ProviderResourcePaginatedResponse" + "$ref": "#/components/schemas/ScorecardPaginatedResponse" } } } } }, "tags": [ - "Provider Resources" + "Observability/Scorecard" + ], + "security": [ + { + "bearer": [] + } + ] + }, + "post": { + "operationId": "ScorecardController_create", + "summary": "Create Scorecard", + "parameters": [], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateScorecardDTO" + } + } + } + }, + "responses": { + "201": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Scorecard" + } + } + } + } + }, + "tags": [ + "Observability/Scorecard" ], "security": [ { @@ -5143,11 +5280,19 @@ ] } }, - "/provider/{provider}/{resourceName}/{id}": { - "get": { - "operationId": "ProviderResourceController_getProviderResource", - "summary": "Get Provider Resource", + "/provider/{provider}/{resourceName}": { + "post": { + "operationId": "ProviderResourceController_createProviderResource", + "summary": "Create Provider Resource", "parameters": [ + { + "name": "content-type", + "required": true, + "in": "header", + "schema": { + "type": "string" + } + }, { "name": "provider", "required": true, @@ -5171,20 +5316,11 @@ ], "type": "string" } - }, - { - "name": "id", - "required": true, - "in": "path", - "schema": { - "format": "uuid", - "type": "string" - } } ], "responses": { - "200": { - "description": "Successfully retrieved provider resource", + "201": { + "description": "Successfully created provider resource", "content": { "application/json": { "schema": { @@ -5192,9 +5328,6 @@ } } } - }, - "404": { - "description": "Provider resource not found" } }, "tags": [ @@ -5206,9 +5339,253 @@ } ] }, - "delete": { - "operationId": "ProviderResourceController_deleteProviderResource", - "summary": "Delete Provider Resource", + "get": { + "operationId": "ProviderResourceController_getProviderResourcesPaginated", + "summary": "List Provider Resources", + "parameters": [ + { + "name": "provider", + "required": true, + "in": "path", + "description": "The provider (e.g., 11labs)", + "schema": { + "enum": [ + "11labs" + ], + "type": "string" + } + }, + { + "name": "resourceName", + "required": true, + "in": "path", + "description": "The resource name (e.g., pronunciation-dictionary)", + "schema": { + "enum": [ + "pronunciation-dictionary" + ], + "type": "string" + } + }, + { + "name": "id", + "required": false, + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "resourceId", + "required": false, + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "page", + "required": false, + "in": "query", + "description": "This is the page number to return. Defaults to 1.", + "schema": { + "minimum": 1, + "type": "number" + } + }, + { + "name": "sortOrder", + "required": false, + "in": "query", + "description": "This is the sort order for pagination. Defaults to 'DESC'.", + "schema": { + "enum": [ + "ASC", + "DESC" + ], + "type": "string" + } + }, + { + "name": "limit", + "required": false, + "in": "query", + "description": "This is the maximum number of items to return. Defaults to 100.", + "schema": { + "minimum": 0, + "maximum": 1000, + "type": "number" + } + }, + { + "name": "createdAtGt", + "required": false, + "in": "query", + "description": "This will return items where the createdAt is greater than the specified value.", + "schema": { + "format": "date-time", + "type": "string" + } + }, + { + "name": "createdAtLt", + "required": false, + "in": "query", + "description": "This will return items where the createdAt is less than the specified value.", + "schema": { + "format": "date-time", + "type": "string" + } + }, + { + "name": "createdAtGe", + "required": false, + "in": "query", + "description": "This will return items where the createdAt is greater than or equal to the specified value.", + "schema": { + "format": "date-time", + "type": "string" + } + }, + { + "name": "createdAtLe", + "required": false, + "in": "query", + "description": "This will return items where the createdAt is less than or equal to the specified value.", + "schema": { + "format": "date-time", + "type": "string" + } + }, + { + "name": "updatedAtGt", + "required": false, + "in": "query", + "description": "This will return items where the updatedAt is greater than the specified value.", + "schema": { + "format": "date-time", + "type": "string" + } + }, + { + "name": "updatedAtLt", + "required": false, + "in": "query", + "description": "This will return items where the updatedAt is less than the specified value.", + "schema": { + "format": "date-time", + "type": "string" + } + }, + { + "name": "updatedAtGe", + "required": false, + "in": "query", + "description": "This will return items where the updatedAt is greater than or equal to the specified value.", + "schema": { + "format": "date-time", + "type": "string" + } + }, + { + "name": "updatedAtLe", + "required": false, + "in": "query", + "description": "This will return items where the updatedAt is less than or equal to the specified value.", + "schema": { + "format": "date-time", + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "List of provider resources", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProviderResourcePaginatedResponse" + } + } + } + } + }, + "tags": [ + "Provider Resources" + ], + "security": [ + { + "bearer": [] + } + ] + } + }, + "/provider/{provider}/{resourceName}/{id}": { + "get": { + "operationId": "ProviderResourceController_getProviderResource", + "summary": "Get Provider Resource", + "parameters": [ + { + "name": "provider", + "required": true, + "in": "path", + "description": "The provider (e.g., 11labs)", + "schema": { + "enum": [ + "11labs" + ], + "type": "string" + } + }, + { + "name": "resourceName", + "required": true, + "in": "path", + "description": "The resource name (e.g., pronunciation-dictionary)", + "schema": { + "enum": [ + "pronunciation-dictionary" + ], + "type": "string" + } + }, + { + "name": "id", + "required": true, + "in": "path", + "schema": { + "format": "uuid", + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successfully retrieved provider resource", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProviderResource" + } + } + } + }, + "404": { + "description": "Provider resource not found" + } + }, + "tags": [ + "Provider Resources" + ], + "security": [ + { + "bearer": [] + } + ] + }, + "delete": { + "operationId": "ProviderResourceController_deleteProviderResource", + "summary": "Delete Provider Resource", "parameters": [ { "name": "provider", @@ -14865,233 +15242,731 @@ "toolId": { "type": "string", "description": "This is the tool to call. To use a transient tool, send `tool` instead." - }, - "name": { - "type": "string", - "maxLength": 80 - }, - "isStart": { - "type": "boolean", - "description": "This is whether or not the node is the start of the workflow." - }, - "metadata": { - "type": "object", - "description": "This is for metadata you want to store on the task." + }, + "name": { + "type": "string", + "maxLength": 80 + }, + "isStart": { + "type": "boolean", + "description": "This is whether or not the node is the start of the workflow." + }, + "metadata": { + "type": "object", + "description": "This is for metadata you want to store on the task." + } + }, + "required": [ + "type", + "name" + ] + }, + "VoicemailDetectionBackoffPlan": { + "type": "object", + "properties": { + "startAtSeconds": { + "type": "number", + "description": "This is the number of seconds to wait before starting the first retry attempt.", + "minimum": 0, + "default": 5 + }, + "frequencySeconds": { + "type": "number", + "description": "This is the interval in seconds between retry attempts.", + "minimum": 2.5, + "default": 5 + }, + "maxRetries": { + "type": "number", + "description": "This is the maximum number of retry attempts before giving up.", + "minimum": 1, + "maximum": 10, + "default": 6 + } + } + }, + "GoogleVoicemailDetectionPlan": { + "type": "object", + "properties": { + "beepMaxAwaitSeconds": { + "type": "number", + "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60", + "minimum": 0, + "maximum": 30, + "default": 30 + }, + "provider": { + "type": "string", + "description": "This is the provider to use for voicemail detection.", + "enum": [ + "google" + ] + }, + "backoffPlan": { + "description": "This is the backoff plan for the voicemail detection.", + "allOf": [ + { + "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan" + } + ] + }, + "type": { + "type": "string", + "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)", + "enum": [ + "audio", + "transcript" + ] + } + }, + "required": [ + "provider" + ] + }, + "OpenAIVoicemailDetectionPlan": { + "type": "object", + "properties": { + "beepMaxAwaitSeconds": { + "type": "number", + "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60", + "minimum": 0, + "maximum": 30, + "default": 30 + }, + "provider": { + "type": "string", + "description": "This is the provider to use for voicemail detection.", + "enum": [ + "openai" + ] + }, + "backoffPlan": { + "description": "This is the backoff plan for the voicemail detection.", + "allOf": [ + { + "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan" + } + ] + }, + "type": { + "type": "string", + "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)", + "enum": [ + "audio", + "transcript" + ] + } + }, + "required": [ + "provider" + ] + }, + "TwilioVoicemailDetectionPlan": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "description": "This is the provider to use for voicemail detection.", + "enum": [ + "twilio" + ] + }, + "voicemailDetectionTypes": { + "type": "array", + "description": "These are the AMD messages from Twilio that are considered as voicemail. Default is ['machine_end_beep', 'machine_end_silence'].\n\n@default {Array} ['machine_end_beep', 'machine_end_silence']", + "enum": [ + "machine_start", + "human", + "fax", + "unknown", + "machine_end_beep", + "machine_end_silence", + "machine_end_other" + ], + "example": [ + "machine_end_beep", + "machine_end_silence" + ], + "items": { + "type": "string", + "enum": [ + "machine_start", + "human", + "fax", + "unknown", + "machine_end_beep", + "machine_end_silence", + "machine_end_other" + ] + } + }, + "enabled": { + "type": "boolean", + "description": "This sets whether the assistant should detect voicemail. Defaults to true.\n\n@default true" + }, + "machineDetectionTimeout": { + "type": "number", + "description": "The number of seconds that Twilio should attempt to perform answering machine detection before timing out and returning AnsweredBy as unknown. Default is 30 seconds.\n\nIncreasing this value will provide the engine more time to make a determination. This can be useful when DetectMessageEnd is provided in the MachineDetection parameter and there is an expectation of long answering machine greetings that can exceed 30 seconds.\n\nDecreasing this value will reduce the amount of time the engine has to make a determination. This can be particularly useful when the Enable option is provided in the MachineDetection parameter and you want to limit the time for initial detection.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 30", + "minimum": 3, + "maximum": 59 + }, + "machineDetectionSpeechThreshold": { + "type": "number", + "description": "The number of milliseconds that is used as the measuring stick for the length of the speech activity. Durations lower than this value will be interpreted as a human, longer as a machine. Default is 2400 milliseconds.\n\nIncreasing this value will reduce the chance of a False Machine (detected machine, actually human) for a long human greeting (e.g., a business greeting) but increase the time it takes to detect a machine.\n\nDecreasing this value will reduce the chances of a False Human (detected human, actually machine) for short voicemail greetings. The value of this parameter may need to be reduced by more than 1000ms to detect very short voicemail greetings. A reduction of that significance can result in increased False Machine detections. Adjusting the MachineDetectionSpeechEndThreshold is likely the better approach for short voicemails. Decreasing MachineDetectionSpeechThreshold will also reduce the time it takes to detect a machine.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 2400", + "minimum": 1000, + "maximum": 6000 + }, + "machineDetectionSpeechEndThreshold": { + "type": "number", + "description": "The number of milliseconds of silence after speech activity at which point the speech activity is considered complete. Default is 1200 milliseconds.\n\nIncreasing this value will typically be used to better address the short voicemail greeting scenarios. For short voicemails, there is typically 1000-2000ms of audio followed by 1200-2400ms of silence and then additional audio before the beep. Increasing the MachineDetectionSpeechEndThreshold to ~2500ms will treat the 1200-2400ms of silence as a gap in the greeting but not the end of the greeting and will result in a machine detection. The downsides of such a change include:\n- Increasing the delay for human detection by the amount you increase this parameter, e.g., a change of 1200ms to 2500ms increases human detection delay by 1300ms.\n- Cases where a human has two utterances separated by a period of silence (e.g. a \"Hello\", then 2000ms of silence, and another \"Hello\") may be interpreted as a machine.\n\nDecreasing this value will result in faster human detection. The consequence is that it can lead to increased False Human (detected human, actually machine) detections because a silence gap in a voicemail greeting (not necessarily just in short voicemail scenarios) can be incorrectly interpreted as the end of speech.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 1200", + "minimum": 500, + "maximum": 5000 + }, + "machineDetectionSilenceTimeout": { + "type": "number", + "description": "The number of milliseconds of initial silence after which an unknown AnsweredBy result will be returned. Default is 5000 milliseconds.\n\nIncreasing this value will result in waiting for a longer period of initial silence before returning an 'unknown' AMD result.\n\nDecreasing this value will result in waiting for a shorter period of initial silence before returning an 'unknown' AMD result.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 5000", + "minimum": 2000, + "maximum": 10000 + } + }, + "required": [ + "provider" + ] + }, + "VapiVoicemailDetectionPlan": { + "type": "object", + "properties": { + "beepMaxAwaitSeconds": { + "type": "number", + "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60", + "minimum": 0, + "maximum": 30, + "default": 30 + }, + "provider": { + "type": "string", + "description": "This is the provider to use for voicemail detection.", + "enum": [ + "vapi" + ] + }, + "backoffPlan": { + "description": "This is the backoff plan for the voicemail detection.", + "allOf": [ + { + "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan" + } + ] + }, + "type": { + "type": "string", + "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)", + "enum": [ + "audio", + "transcript" + ] + } + }, + "required": [ + "provider" + ] + }, + "TransferHookAction": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "This is the type of action - must be \"transfer\"", + "enum": [ + "transfer" + ] + }, + "destination": { + "description": "This is the destination details for the transfer - can be a phone number or SIP URI", + "oneOf": [ + { + "$ref": "#/components/schemas/TransferDestinationNumber", + "title": "NumberTransferDestination" + }, + { + "$ref": "#/components/schemas/TransferDestinationSip", + "title": "SipTransferDestination" + } + ] + } + }, + "required": [ + "type" + ] + }, + "FunctionCallHookAction": { + "type": "object", + "properties": { + "messages": { + "type": "array", + "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ToolMessageStart", + "title": "ToolMessageStart" + }, + { + "$ref": "#/components/schemas/ToolMessageComplete", + "title": "ToolMessageComplete" + }, + { + "$ref": "#/components/schemas/ToolMessageFailed", + "title": "ToolMessageFailed" + }, + { + "$ref": "#/components/schemas/ToolMessageDelayed", + "title": "ToolMessageDelayed" + } + ] + } + }, + "type": { + "type": "string", + "enum": [ + "function" + ], + "description": "The type of tool. \"function\" for Function tool." + }, + "async": { + "type": "boolean", + "example": false, + "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)." + }, + "server": { + "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.", + "allOf": [ + { + "$ref": "#/components/schemas/Server" + } + ] + }, + "rejectionPlan": { + "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```", + "allOf": [ + { + "$ref": "#/components/schemas/ToolRejectionPlan" + } + ] + }, + "function": { + "description": "This is the function definition of the tool.", + "allOf": [ + { + "$ref": "#/components/schemas/OpenAIFunction" + } + ] + } + }, + "required": [ + "type" + ] + }, + "SayHookAction": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "This is the type of action - must be \"say\"", + "enum": [ + "say" + ] + }, + "prompt": { + "description": "This is the prompt for the assistant to generate a response based on existing conversation.\nCan be a string or an array of chat messages.", + "oneOf": [ + { + "type": "string", + "title": "String" + }, + { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/SystemMessage", + "title": "SystemMessage" + }, + { + "$ref": "#/components/schemas/UserMessage", + "title": "UserMessage" + }, + { + "$ref": "#/components/schemas/AssistantMessage", + "title": "AssistantMessage" + }, + { + "$ref": "#/components/schemas/ToolMessage", + "title": "ToolMessage" + }, + { + "$ref": "#/components/schemas/DeveloperMessage", + "title": "DeveloperMessage" + } + ] + }, + "title": "MessageArray" + } + ], + "examples": [ + "Ask the user if they're still in the call", + [ + { + "role": "system", + "content": "You are a helpful assistant, and would like to know if the user is still in the call based on the conversation history in {{transcript}}" + } + ] + ] + }, + "exact": { + "type": "object", + "description": "This is the message to say" + } + }, + "required": [ + "type" + ] + }, + "CallHookFilter": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "This is the type of filter - currently only \"oneOf\" is supported", + "enum": [ + "oneOf" + ], + "maxLength": 1000 + }, + "key": { + "type": "string", + "description": "This is the key to filter on (e.g. \"call.endedReason\")", + "maxLength": 1000 + }, + "oneOf": { + "description": "This is the array of possible values to match against", + "type": "array", + "items": { + "type": "string", + "maxLength": 1000 + } + } + }, + "required": [ + "type", + "key", + "oneOf" + ] + }, + "CallHookCallEnding": { + "type": "object", + "properties": { + "on": { + "type": "string", + "description": "This is the event that triggers this hook", + "enum": [ + "call.ending" + ], + "maxLength": 1000 + }, + "do": { + "type": "array", + "description": "This is the set of actions to perform when the hook triggers", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ToolCallHookAction", + "title": "ToolCallHookAction" + } + ] + } + }, + "filters": { + "description": "This is the set of filters that must match for the hook to trigger", + "type": "array", + "items": { + "$ref": "#/components/schemas/CallHookFilter" + } + } + }, + "required": [ + "on", + "do" + ] + }, + "CallHookAssistantSpeechInterrupted": { + "type": "object", + "properties": { + "on": { + "type": "string", + "description": "This is the event that triggers this hook", + "enum": [ + "assistant.speech.interrupted" + ], + "maxLength": 1000 + }, + "do": { + "type": "array", + "description": "This is the set of actions to perform when the hook triggers", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/SayHookAction", + "title": "SayHookAction" + }, + { + "$ref": "#/components/schemas/ToolCallHookAction", + "title": "ToolCallHookAction" + } + ] + } + } + }, + "required": [ + "on", + "do" + ] + }, + "CallHookCustomerSpeechInterrupted": { + "type": "object", + "properties": { + "on": { + "type": "string", + "description": "This is the event that triggers this hook", + "enum": [ + "customer.speech.interrupted" + ], + "maxLength": 1000 + }, + "do": { + "type": "array", + "description": "This is the set of actions to perform when the hook triggers", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/SayHookAction", + "title": "SayHookAction" + }, + { + "$ref": "#/components/schemas/ToolCallHookAction", + "title": "ToolCallHookAction" + } + ] + } + } + }, + "required": [ + "on", + "do" + ] + }, + "ToolCallHookAction": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "This is the type of action - must be \"tool\"", + "enum": [ + "tool" + ] + }, + "tool": { + "description": "This is the tool to call. To use an existing tool, send `toolId` instead.", + "oneOf": [ + { + "$ref": "#/components/schemas/CreateApiRequestToolDTO", + "title": "ApiRequestTool" + }, + { + "$ref": "#/components/schemas/CreateBashToolDTO", + "title": "BashTool" + }, + { + "$ref": "#/components/schemas/CreateComputerToolDTO", + "title": "ComputerTool" + }, + { + "$ref": "#/components/schemas/CreateDtmfToolDTO", + "title": "DtmfTool" + }, + { + "$ref": "#/components/schemas/CreateEndCallToolDTO", + "title": "EndCallTool" + }, + { + "$ref": "#/components/schemas/CreateFunctionToolDTO", + "title": "FunctionTool" + }, + { + "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO", + "title": "GoHighLevelCalendarAvailabilityTool" + }, + { + "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO", + "title": "GoHighLevelCalendarEventCreateTool" + }, + { + "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO", + "title": "GoHighLevelContactCreateTool" + }, + { + "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO", + "title": "GoHighLevelContactGetTool" + }, + { + "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO", + "title": "GoogleCalendarCheckAvailabilityTool" + }, + { + "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO", + "title": "GoogleCalendarCreateEventTool" + }, + { + "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO", + "title": "GoogleSheetsRowAppendTool" + }, + { + "$ref": "#/components/schemas/CreateHandoffToolDTO", + "title": "HandoffTool" + }, + { + "$ref": "#/components/schemas/CreateMcpToolDTO", + "title": "McpTool" + }, + { + "$ref": "#/components/schemas/CreateQueryToolDTO", + "title": "QueryTool" + }, + { + "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO", + "title": "SlackSendMessageTool" + }, + { + "$ref": "#/components/schemas/CreateSmsToolDTO", + "title": "SmsTool" + }, + { + "$ref": "#/components/schemas/CreateTextEditorToolDTO", + "title": "TextEditorTool" + }, + { + "$ref": "#/components/schemas/CreateTransferCallToolDTO", + "title": "TransferCallTool" + } + ] + }, + "toolId": { + "type": "string", + "description": "This is the tool to call. To use a transient tool, send `tool` instead." } }, "required": [ - "type", - "name" + "type" ] }, - "VoicemailDetectionBackoffPlan": { + "CustomerSpeechTimeoutOptions": { "type": "object", "properties": { - "startAtSeconds": { - "type": "number", - "description": "This is the number of seconds to wait before starting the first retry attempt.", - "minimum": 0, - "default": 5 - }, - "frequencySeconds": { + "timeoutSeconds": { "type": "number", - "description": "This is the interval in seconds between retry attempts.", - "minimum": 2.5, - "default": 5 + "description": "This is the timeout in seconds before action is triggered.\nThe clock starts when the assistant finishes speaking and remains active until the user speaks.\n\n@default 7.5", + "minimum": 1, + "maximum": 1000 }, - "maxRetries": { + "triggerMaxCount": { "type": "number", - "description": "This is the maximum number of retry attempts before giving up.", + "description": "This is the maximum number of times the hook will trigger in a call.\n\n@default 3", "minimum": 1, - "maximum": 10, - "default": 6 - } - } - }, - "GoogleVoicemailDetectionPlan": { - "type": "object", - "properties": { - "beepMaxAwaitSeconds": { - "type": "number", - "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60", - "minimum": 0, - "maximum": 30, - "default": 30 - }, - "provider": { - "type": "string", - "description": "This is the provider to use for voicemail detection.", - "enum": [ - "google" - ] - }, - "backoffPlan": { - "description": "This is the backoff plan for the voicemail detection.", - "allOf": [ - { - "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan" - } - ] + "maximum": 10 }, - "type": { - "type": "string", - "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)", - "enum": [ - "audio", - "transcript" - ] + "triggerResetMode": { + "type": "object", + "description": "This is whether the counter for hook trigger resets the user speaks.\n\n@default never" } }, "required": [ - "provider" + "timeoutSeconds" ] }, - "OpenAIVoicemailDetectionPlan": { + "CallHookCustomerSpeechTimeout": { "type": "object", "properties": { - "beepMaxAwaitSeconds": { - "type": "number", - "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60", - "minimum": 0, - "maximum": 30, - "default": 30 - }, - "provider": { + "on": { "type": "string", - "description": "This is the provider to use for voicemail detection.", - "enum": [ - "openai" - ] + "description": "Must be either \"customer.speech.timeout\" or match the pattern \"customer.speech.timeout[property=value]\"", + "maxLength": 1000 }, - "backoffPlan": { - "description": "This is the backoff plan for the voicemail detection.", + "do": { + "type": "array", + "description": "This is the set of actions to perform when the hook triggers", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/SayHookAction", + "title": "SayHookAction" + }, + { + "$ref": "#/components/schemas/ToolCallHookAction", + "title": "ToolCallHookAction" + } + ] + } + }, + "options": { + "description": "This is the set of filters that must match for the hook to trigger", "allOf": [ { - "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan" + "$ref": "#/components/schemas/CustomerSpeechTimeoutOptions" } ] }, - "type": { + "name": { "type": "string", - "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)", - "enum": [ - "audio", - "transcript" - ] + "description": "This is the name of the hook, it can be set by the user to identify the hook.\nIf no name is provided, the hook will be auto generated as UUID.\n\n@default UUID", + "maxLength": 1000 } }, "required": [ - "provider" + "on", + "do" ] }, - "TwilioVoicemailDetectionPlan": { + "CallHookModelResponseTimeout": { "type": "object", "properties": { - "provider": { + "on": { "type": "string", - "description": "This is the provider to use for voicemail detection.", + "description": "This is the event that triggers this hook", "enum": [ - "twilio" - ] + "model.response.timeout" + ], + "maxLength": 1000 }, - "voicemailDetectionTypes": { + "do": { "type": "array", - "description": "These are the AMD messages from Twilio that are considered as voicemail. Default is ['machine_end_beep', 'machine_end_silence'].\n\n@default {Array} ['machine_end_beep', 'machine_end_silence']", - "enum": [ - "machine_start", - "human", - "fax", - "unknown", - "machine_end_beep", - "machine_end_silence", - "machine_end_other" - ], - "example": [ - "machine_end_beep", - "machine_end_silence" - ], + "description": "This is the set of actions to perform when the hook triggers", "items": { - "type": "string", - "enum": [ - "machine_start", - "human", - "fax", - "unknown", - "machine_end_beep", - "machine_end_silence", - "machine_end_other" + "oneOf": [ + { + "$ref": "#/components/schemas/SayHookAction", + "title": "SayHookAction" + }, + { + "$ref": "#/components/schemas/ToolCallHookAction", + "title": "ToolCallHookAction" + } ] } - }, - "enabled": { - "type": "boolean", - "description": "This sets whether the assistant should detect voicemail. Defaults to true.\n\n@default true" - }, - "machineDetectionTimeout": { - "type": "number", - "description": "The number of seconds that Twilio should attempt to perform answering machine detection before timing out and returning AnsweredBy as unknown. Default is 30 seconds.\n\nIncreasing this value will provide the engine more time to make a determination. This can be useful when DetectMessageEnd is provided in the MachineDetection parameter and there is an expectation of long answering machine greetings that can exceed 30 seconds.\n\nDecreasing this value will reduce the amount of time the engine has to make a determination. This can be particularly useful when the Enable option is provided in the MachineDetection parameter and you want to limit the time for initial detection.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 30", - "minimum": 3, - "maximum": 59 - }, - "machineDetectionSpeechThreshold": { - "type": "number", - "description": "The number of milliseconds that is used as the measuring stick for the length of the speech activity. Durations lower than this value will be interpreted as a human, longer as a machine. Default is 2400 milliseconds.\n\nIncreasing this value will reduce the chance of a False Machine (detected machine, actually human) for a long human greeting (e.g., a business greeting) but increase the time it takes to detect a machine.\n\nDecreasing this value will reduce the chances of a False Human (detected human, actually machine) for short voicemail greetings. The value of this parameter may need to be reduced by more than 1000ms to detect very short voicemail greetings. A reduction of that significance can result in increased False Machine detections. Adjusting the MachineDetectionSpeechEndThreshold is likely the better approach for short voicemails. Decreasing MachineDetectionSpeechThreshold will also reduce the time it takes to detect a machine.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 2400", - "minimum": 1000, - "maximum": 6000 - }, - "machineDetectionSpeechEndThreshold": { - "type": "number", - "description": "The number of milliseconds of silence after speech activity at which point the speech activity is considered complete. Default is 1200 milliseconds.\n\nIncreasing this value will typically be used to better address the short voicemail greeting scenarios. For short voicemails, there is typically 1000-2000ms of audio followed by 1200-2400ms of silence and then additional audio before the beep. Increasing the MachineDetectionSpeechEndThreshold to ~2500ms will treat the 1200-2400ms of silence as a gap in the greeting but not the end of the greeting and will result in a machine detection. The downsides of such a change include:\n- Increasing the delay for human detection by the amount you increase this parameter, e.g., a change of 1200ms to 2500ms increases human detection delay by 1300ms.\n- Cases where a human has two utterances separated by a period of silence (e.g. a \"Hello\", then 2000ms of silence, and another \"Hello\") may be interpreted as a machine.\n\nDecreasing this value will result in faster human detection. The consequence is that it can lead to increased False Human (detected human, actually machine) detections because a silence gap in a voicemail greeting (not necessarily just in short voicemail scenarios) can be incorrectly interpreted as the end of speech.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 1200", - "minimum": 500, - "maximum": 5000 - }, - "machineDetectionSilenceTimeout": { - "type": "number", - "description": "The number of milliseconds of initial silence after which an unknown AnsweredBy result will be returned. Default is 5000 milliseconds.\n\nIncreasing this value will result in waiting for a longer period of initial silence before returning an 'unknown' AMD result.\n\nDecreasing this value will result in waiting for a shorter period of initial silence before returning an 'unknown' AMD result.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 5000", - "minimum": 2000, - "maximum": 10000 - } - }, - "required": [ - "provider" - ] - }, - "VapiVoicemailDetectionPlan": { - "type": "object", - "properties": { - "beepMaxAwaitSeconds": { - "type": "number", - "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60", - "minimum": 0, - "maximum": 30, - "default": 30 - }, - "provider": { - "type": "string", - "description": "This is the provider to use for voicemail detection.", - "enum": [ - "vapi" - ] - }, - "backoffPlan": { - "description": "This is the backoff plan for the voicemail detection.", - "allOf": [ - { - "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan" - } - ] - }, - "type": { - "type": "string", - "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)", - "enum": [ - "audio", - "transcript" - ] } }, "required": [ - "provider" + "on", + "do" ] }, "AIEdgeCondition": { @@ -15593,6 +16468,58 @@ } } }, + "ScorecardMetric": { + "type": "object", + "properties": { + "structuredOutputId": { + "type": "string", + "description": "This is the unique identifier for the structured output that will be used to evaluate the scorecard.\nThe structured output must be of type number or boolean only for now." + }, + "conditions": { + "description": "These are the conditions that will be used to evaluate the scorecard.\nEach condition will have a comparator, value, and points that will be used to calculate the final score.\nThe points will be added to the overall score if the condition is met.\nThe overall score will be normalized to a 100 point scale to ensure uniformity across different scorecards.", + "type": "array", + "items": { + "type": "object" + } + } + }, + "required": [ + "structuredOutputId", + "conditions" + ] + }, + "CreateScorecardDTO": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "This is the name of the scorecard. It is only for user reference and will not be used for any evaluation.", + "maxLength": 80 + }, + "description": { + "type": "string", + "description": "This is the description of the scorecard. It is only for user reference and will not be used for any evaluation.", + "maxLength": 500 + }, + "metrics": { + "description": "These are the metrics that will be used to evaluate the scorecard.\nEach metric will have a set of conditions and points that will be used to generate the score.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ScorecardMetric" + } + }, + "assistantIds": { + "description": "These are the assistant IDs that this scorecard is linked to.\nWhen linked to assistants, this scorecard will be available for evaluation during those assistants' calls.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "metrics" + ] + }, "ArtifactPlan": { "type": "object", "properties": { @@ -15668,6 +16595,20 @@ "type": "string" } }, + "scorecardIds": { + "description": "This is an array of scorecard IDs that will be evaluated based on the structured outputs extracted during the call.\nThe scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended.", + "type": "array", + "items": { + "type": "string" + } + }, + "scorecards": { + "description": "This is the array of scorecards that will be evaluated based on the structured outputs extracted during the call.\nThe scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended.", + "type": "array", + "items": { + "$ref": "#/components/schemas/CreateScorecardDTO" + } + }, "loggingPath": { "type": "string", "description": "This is the path where the call logs will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the call logs to a specific path, set this to the path. Example: `/my-assistant-logs`.\n- If you want to upload the call logs to the root of the bucket, set this to `/`.\n\n@default '/'" @@ -16453,6 +17394,10 @@ { "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout", "title": "CallHookCustomerSpeechTimeout" + }, + { + "$ref": "#/components/schemas/CallHookModelResponseTimeout", + "title": "CallHookModelResponseTimeout" } ] } @@ -16724,6 +17669,12 @@ "voicemailDetection": { "description": "This is the voicemail detection plan for the workflow.", "oneOf": [ + { + "type": "string", + "enum": [ + "off" + ] + }, { "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan", "title": "Google" @@ -17594,6 +18545,48 @@ } } }, + "CartesiaGenerationConfigExperimental": { + "type": "object", + "properties": { + "accentLocalization": { + "type": "integer", + "description": "Toggle accent localization for sonic-3: 0 (disabled, default) or 1 (enabled). When enabled, the voice adapts to match the transcript language accent while preserving vocal characteristics.", + "example": 0, + "minimum": 0, + "maximum": 1, + "default": 0 + } + } + }, + "CartesiaGenerationConfig": { + "type": "object", + "properties": { + "speed": { + "type": "number", + "description": "Fine-grained speed control for sonic-3. Only available for sonic-3 model.", + "example": 1, + "minimum": 0.6, + "maximum": 1.5, + "default": 1 + }, + "volume": { + "type": "number", + "description": "Fine-grained volume control for sonic-3. Only available for sonic-3 model.", + "example": 1, + "minimum": 0.5, + "maximum": 2, + "default": 1 + }, + "experimental": { + "description": "Experimental model controls for sonic-3. These are subject to breaking changes.", + "allOf": [ + { + "$ref": "#/components/schemas/CartesiaGenerationConfigExperimental" + } + ] + } + } + }, "CartesiaVoice": { "type": "object", "properties": { @@ -17618,6 +18611,7 @@ "type": "string", "description": "This is the model that will be used. This is optional and will default to the correct model for the voiceId.", "enum": [ + "sonic-3", "sonic-2", "sonic-english", "sonic-multilingual", @@ -17630,21 +18624,48 @@ "type": "string", "description": "This is the language that will be used. This is optional and will default to the correct language for the voiceId.", "enum": [ - "en", + "ar", + "bg", + "bn", + "cs", + "da", "de", + "el", + "en", "es", + "fi", "fr", - "ja", - "pt", - "zh", + "gu", + "he", "hi", + "hr", + "hu", + "id", "it", + "ja", + "ka", + "kn", "ko", + "ml", + "mr", + "ms", "nl", + "no", + "pa", "pl", + "pt", + "ro", "ru", + "sk", "sv", - "tr" + "ta", + "te", + "th", + "tl", + "tr", + "uk", + "vi", + "zh" ], "example": "en" }, @@ -17656,6 +18677,19 @@ } ] }, + "generationConfig": { + "description": "Generation config for fine-grained control of sonic-3 voice output (speed, volume, and experimental controls). Only available for sonic-3 model.", + "allOf": [ + { + "$ref": "#/components/schemas/CartesiaGenerationConfig" + } + ] + }, + "pronunciationDictId": { + "type": "string", + "description": "Pronunciation dictionary ID for sonic-3. Allows custom pronunciations for specific words. Only available for sonic-3 model.", + "example": "dict_abc123" + }, "chunkPlan": { "description": "This is the plan for chunking the model output before it is sent to the voice provider.", "allOf": [ @@ -19743,6 +20777,7 @@ "type": "string", "description": "This is the model that will be used. This is optional and will default to the correct model for the voiceId.", "enum": [ + "sonic-3", "sonic-2", "sonic-english", "sonic-multilingual", @@ -19755,21 +20790,48 @@ "type": "string", "description": "This is the language that will be used. This is optional and will default to the correct language for the voiceId.", "enum": [ - "en", + "ar", + "bg", + "bn", + "cs", + "da", "de", + "el", + "en", "es", + "fi", "fr", - "ja", - "pt", - "zh", + "gu", + "he", "hi", + "hr", + "hu", + "id", "it", + "ja", + "ka", + "kn", "ko", + "ml", + "mr", + "ms", "nl", + "no", + "pa", "pl", + "pt", + "ro", "ru", + "sk", "sv", - "tr" + "ta", + "te", + "th", + "tl", + "tr", + "uk", + "vi", + "zh" ], "example": "en" }, @@ -19781,6 +20843,19 @@ } ] }, + "generationConfig": { + "description": "Generation config for fine-grained control of sonic-3 voice output (speed, volume, and experimental controls). Only available for sonic-3 model.", + "allOf": [ + { + "$ref": "#/components/schemas/CartesiaGenerationConfig" + } + ] + }, + "pronunciationDictId": { + "type": "string", + "description": "Pronunciation dictionary ID for sonic-3. Allows custom pronunciations for specific words. Only available for sonic-3 model.", + "example": "dict_abc123" + }, "chunkPlan": { "description": "This is the plan for chunking the model output before it is sent to the voice provider.", "allOf": [ @@ -23093,624 +24168,159 @@ }, "required": [ "provider", - "authenticationPlan" - ] - }, - "CreateXAiCredentialDTO": { - "type": "object", - "properties": { - "provider": { - "type": "string", - "description": "This is the api key for Grok in XAi's console. Get it from here: https://console.x.ai", - "enum": [ - "xai" - ] - }, - "apiKey": { - "type": "string", - "maxLength": 10000, - "description": "This is not returned in the API." - }, - "name": { - "type": "string", - "description": "This is the name of credential. This is just for your reference.", - "minLength": 1, - "maxLength": 40 - } - }, - "required": [ - "provider", - "apiKey" - ] - }, - "CreateGoogleCalendarOAuth2ClientCredentialDTO": { - "type": "object", - "properties": { - "provider": { - "type": "string", - "enum": [ - "google.calendar.oauth2-client" - ] - }, - "name": { - "type": "string", - "description": "This is the name of credential. This is just for your reference.", - "minLength": 1, - "maxLength": 40 - } - }, - "required": [ - "provider" - ] - }, - "CreateGoogleCalendarOAuth2AuthorizationCredentialDTO": { - "type": "object", - "properties": { - "provider": { - "type": "string", - "enum": [ - "google.calendar.oauth2-authorization" - ] - }, - "authorizationId": { - "type": "string", - "description": "The authorization ID for the OAuth2 authorization" - }, - "name": { - "type": "string", - "description": "This is the name of credential. This is just for your reference.", - "minLength": 1, - "maxLength": 40 - } - }, - "required": [ - "provider", - "authorizationId" - ] - }, - "CreateGoogleSheetsOAuth2AuthorizationCredentialDTO": { - "type": "object", - "properties": { - "provider": { - "type": "string", - "enum": [ - "google.sheets.oauth2-authorization" - ] - }, - "authorizationId": { - "type": "string", - "description": "The authorization ID for the OAuth2 authorization" - }, - "name": { - "type": "string", - "description": "This is the name of credential. This is just for your reference.", - "minLength": 1, - "maxLength": 40 - } - }, - "required": [ - "provider", - "authorizationId" - ] - }, - "CreateSlackOAuth2AuthorizationCredentialDTO": { - "type": "object", - "properties": { - "provider": { - "type": "string", - "enum": [ - "slack.oauth2-authorization" - ] - }, - "authorizationId": { - "type": "string", - "description": "The authorization ID for the OAuth2 authorization" - }, - "name": { - "type": "string", - "description": "This is the name of credential. This is just for your reference.", - "minLength": 1, - "maxLength": 40 - } - }, - "required": [ - "provider", - "authorizationId" - ] - }, - "CreateMinimaxCredentialDTO": { - "type": "object", - "properties": { - "provider": { - "type": "string", - "enum": [ - "minimax" - ] - }, - "apiKey": { - "type": "string", - "description": "This is not returned in the API." - }, - "groupId": { - "type": "string", - "description": "This is the Minimax Group ID." - }, - "name": { - "type": "string", - "description": "This is the name of credential. This is just for your reference.", - "minLength": 1, - "maxLength": 40 - } - }, - "required": [ - "provider", - "apiKey", - "groupId" - ] - }, - "TransferHookAction": { - "type": "object", - "properties": { - "type": { - "type": "string", - "description": "This is the type of action - must be \"transfer\"", - "enum": [ - "transfer" - ] - }, - "destination": { - "description": "This is the destination details for the transfer - can be a phone number or SIP URI", - "oneOf": [ - { - "$ref": "#/components/schemas/TransferDestinationNumber", - "title": "NumberTransferDestination" - }, - { - "$ref": "#/components/schemas/TransferDestinationSip", - "title": "SipTransferDestination" - } - ] - } - }, - "required": [ - "type" - ] - }, - "FunctionCallHookAction": { - "type": "object", - "properties": { - "messages": { - "type": "array", - "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.", - "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/ToolMessageStart", - "title": "ToolMessageStart" - }, - { - "$ref": "#/components/schemas/ToolMessageComplete", - "title": "ToolMessageComplete" - }, - { - "$ref": "#/components/schemas/ToolMessageFailed", - "title": "ToolMessageFailed" - }, - { - "$ref": "#/components/schemas/ToolMessageDelayed", - "title": "ToolMessageDelayed" - } - ] - } - }, - "type": { - "type": "string", - "enum": [ - "function" - ], - "description": "The type of tool. \"function\" for Function tool." - }, - "async": { - "type": "boolean", - "example": false, - "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)." - }, - "server": { - "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.", - "allOf": [ - { - "$ref": "#/components/schemas/Server" - } - ] - }, - "rejectionPlan": { - "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```", - "allOf": [ - { - "$ref": "#/components/schemas/ToolRejectionPlan" - } - ] - }, - "function": { - "description": "This is the function definition of the tool.", - "allOf": [ - { - "$ref": "#/components/schemas/OpenAIFunction" - } - ] - } - }, - "required": [ - "type" - ] - }, - "SayHookAction": { - "type": "object", - "properties": { - "type": { - "type": "string", - "description": "This is the type of action - must be \"say\"", - "enum": [ - "say" - ] - }, - "prompt": { - "description": "This is the prompt for the assistant to generate a response based on existing conversation.\nCan be a string or an array of chat messages.", - "oneOf": [ - { - "type": "string", - "title": "String" - }, - { - "type": "array", - "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/SystemMessage", - "title": "SystemMessage" - }, - { - "$ref": "#/components/schemas/UserMessage", - "title": "UserMessage" - }, - { - "$ref": "#/components/schemas/AssistantMessage", - "title": "AssistantMessage" - }, - { - "$ref": "#/components/schemas/ToolMessage", - "title": "ToolMessage" - }, - { - "$ref": "#/components/schemas/DeveloperMessage", - "title": "DeveloperMessage" - } - ] - }, - "title": "MessageArray" - } - ], - "examples": [ - "Ask the user if they're still in the call", - [ - { - "role": "system", - "content": "You are a helpful assistant, and would like to know if the user is still in the call based on the conversation history in {{transcript}}" - } - ] - ] - }, - "exact": { - "type": "object", - "description": "This is the message to say" - } - }, - "required": [ - "type" - ] - }, - "CallHookFilter": { - "type": "object", - "properties": { - "type": { - "type": "string", - "description": "This is the type of filter - currently only \"oneOf\" is supported", - "enum": [ - "oneOf" - ], - "maxLength": 1000 - }, - "key": { - "type": "string", - "description": "This is the key to filter on (e.g. \"call.endedReason\")", - "maxLength": 1000 - }, - "oneOf": { - "description": "This is the array of possible values to match against", - "type": "array", - "items": { - "type": "string", - "maxLength": 1000 - } - } - }, - "required": [ - "type", - "key", - "oneOf" + "authenticationPlan" ] }, - "CallHookCallEnding": { + "CreateXAiCredentialDTO": { "type": "object", "properties": { - "on": { + "provider": { "type": "string", - "description": "This is the event that triggers this hook", + "description": "This is the api key for Grok in XAi's console. Get it from here: https://console.x.ai", "enum": [ - "call.ending" - ], - "maxLength": 1000 + "xai" + ] }, - "do": { - "type": "array", - "description": "This is the set of actions to perform when the hook triggers", - "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/ToolCallHookAction", - "title": "ToolCallHookAction" - } - ] - } + "apiKey": { + "type": "string", + "maxLength": 10000, + "description": "This is not returned in the API." }, - "filters": { - "description": "This is the set of filters that must match for the hook to trigger", - "type": "array", - "items": { - "$ref": "#/components/schemas/CallHookFilter" - } + "name": { + "type": "string", + "description": "This is the name of credential. This is just for your reference.", + "minLength": 1, + "maxLength": 40 } }, "required": [ - "on", - "do" + "provider", + "apiKey" ] }, - "CallHookAssistantSpeechInterrupted": { + "CreateGoogleCalendarOAuth2ClientCredentialDTO": { "type": "object", "properties": { - "on": { + "provider": { "type": "string", - "description": "This is the event that triggers this hook", "enum": [ - "assistant.speech.interrupted" - ], - "maxLength": 1000 + "google.calendar.oauth2-client" + ] }, - "do": { - "type": "array", - "description": "This is the set of actions to perform when the hook triggers", - "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/SayHookAction", - "title": "SayHookAction" - }, - { - "$ref": "#/components/schemas/ToolCallHookAction", - "title": "ToolCallHookAction" - } - ] - } + "name": { + "type": "string", + "description": "This is the name of credential. This is just for your reference.", + "minLength": 1, + "maxLength": 40 } }, "required": [ - "on", - "do" + "provider" ] }, - "CallHookCustomerSpeechInterrupted": { + "CreateGoogleCalendarOAuth2AuthorizationCredentialDTO": { "type": "object", "properties": { - "on": { + "provider": { "type": "string", - "description": "This is the event that triggers this hook", "enum": [ - "customer.speech.interrupted" - ], - "maxLength": 1000 + "google.calendar.oauth2-authorization" + ] }, - "do": { - "type": "array", - "description": "This is the set of actions to perform when the hook triggers", - "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/SayHookAction", - "title": "SayHookAction" - }, - { - "$ref": "#/components/schemas/ToolCallHookAction", - "title": "ToolCallHookAction" - } - ] - } + "authorizationId": { + "type": "string", + "description": "The authorization ID for the OAuth2 authorization" + }, + "name": { + "type": "string", + "description": "This is the name of credential. This is just for your reference.", + "minLength": 1, + "maxLength": 40 } }, "required": [ - "on", - "do" + "provider", + "authorizationId" ] }, - "ToolCallHookAction": { + "CreateGoogleSheetsOAuth2AuthorizationCredentialDTO": { "type": "object", "properties": { - "type": { + "provider": { "type": "string", - "description": "This is the type of action - must be \"tool\"", "enum": [ - "tool" + "google.sheets.oauth2-authorization" ] }, - "tool": { - "description": "This is the tool to call. To use an existing tool, send `toolId` instead.", - "oneOf": [ - { - "$ref": "#/components/schemas/CreateApiRequestToolDTO", - "title": "ApiRequestTool" - }, - { - "$ref": "#/components/schemas/CreateBashToolDTO", - "title": "BashTool" - }, - { - "$ref": "#/components/schemas/CreateComputerToolDTO", - "title": "ComputerTool" - }, - { - "$ref": "#/components/schemas/CreateDtmfToolDTO", - "title": "DtmfTool" - }, - { - "$ref": "#/components/schemas/CreateEndCallToolDTO", - "title": "EndCallTool" - }, - { - "$ref": "#/components/schemas/CreateFunctionToolDTO", - "title": "FunctionTool" - }, - { - "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO", - "title": "GoHighLevelCalendarAvailabilityTool" - }, - { - "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO", - "title": "GoHighLevelCalendarEventCreateTool" - }, - { - "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO", - "title": "GoHighLevelContactCreateTool" - }, - { - "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO", - "title": "GoHighLevelContactGetTool" - }, - { - "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO", - "title": "GoogleCalendarCheckAvailabilityTool" - }, - { - "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO", - "title": "GoogleCalendarCreateEventTool" - }, - { - "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO", - "title": "GoogleSheetsRowAppendTool" - }, - { - "$ref": "#/components/schemas/CreateHandoffToolDTO", - "title": "HandoffTool" - }, - { - "$ref": "#/components/schemas/CreateMcpToolDTO", - "title": "McpTool" - }, - { - "$ref": "#/components/schemas/CreateQueryToolDTO", - "title": "QueryTool" - }, - { - "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO", - "title": "SlackSendMessageTool" - }, - { - "$ref": "#/components/schemas/CreateSmsToolDTO", - "title": "SmsTool" - }, - { - "$ref": "#/components/schemas/CreateTextEditorToolDTO", - "title": "TextEditorTool" - }, - { - "$ref": "#/components/schemas/CreateTransferCallToolDTO", - "title": "TransferCallTool" - } - ] + "authorizationId": { + "type": "string", + "description": "The authorization ID for the OAuth2 authorization" }, - "toolId": { + "name": { "type": "string", - "description": "This is the tool to call. To use a transient tool, send `tool` instead." + "description": "This is the name of credential. This is just for your reference.", + "minLength": 1, + "maxLength": 40 } }, "required": [ - "type" + "provider", + "authorizationId" ] }, - "CustomerSpeechTimeoutOptions": { + "CreateSlackOAuth2AuthorizationCredentialDTO": { "type": "object", "properties": { - "timeoutSeconds": { - "type": "number", - "description": "This is the timeout in seconds before action is triggered.\nThe clock starts when the assistant finishes speaking and remains active until the user speaks.\n\n@default 7.5", - "minimum": 1, - "maximum": 1000 + "provider": { + "type": "string", + "enum": [ + "slack.oauth2-authorization" + ] }, - "triggerMaxCount": { - "type": "number", - "description": "This is the maximum number of times the hook will trigger in a call.\n\n@default 3", - "minimum": 1, - "maximum": 10 + "authorizationId": { + "type": "string", + "description": "The authorization ID for the OAuth2 authorization" }, - "triggerResetMode": { - "type": "object", - "description": "This is whether the counter for hook trigger resets the user speaks.\n\n@default never" + "name": { + "type": "string", + "description": "This is the name of credential. This is just for your reference.", + "minLength": 1, + "maxLength": 40 } }, "required": [ - "timeoutSeconds" + "provider", + "authorizationId" ] }, - "CallHookCustomerSpeechTimeout": { + "CreateMinimaxCredentialDTO": { "type": "object", "properties": { - "on": { + "provider": { "type": "string", - "description": "Must be either \"customer.speech.timeout\" or match the pattern \"customer.speech.timeout[property=value]\"", - "maxLength": 1000 + "enum": [ + "minimax" + ] }, - "do": { - "type": "array", - "description": "This is the set of actions to perform when the hook triggers", - "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/SayHookAction", - "title": "SayHookAction" - }, - { - "$ref": "#/components/schemas/ToolCallHookAction", - "title": "ToolCallHookAction" - } - ] - } + "apiKey": { + "type": "string", + "description": "This is not returned in the API." }, - "options": { - "description": "This is the set of filters that must match for the hook to trigger", - "allOf": [ - { - "$ref": "#/components/schemas/CustomerSpeechTimeoutOptions" - } - ] + "groupId": { + "type": "string", + "description": "This is the Minimax Group ID." }, "name": { "type": "string", - "description": "This is the name of the hook, it can be set by the user to identify the hook.\nIf no name is provided, the hook will be auto generated as UUID.\n\n@default UUID", - "maxLength": 1000 + "description": "This is the name of credential. This is just for your reference.", + "minLength": 1, + "maxLength": 40 } }, "required": [ - "on", - "do" + "provider", + "apiKey", + "groupId" ] }, "SQLInjectionSecurityFilter": { @@ -24015,8 +24625,14 @@ "example": "assistant-speaks-first" }, "voicemailDetection": { - "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nThis uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached.\nYou can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.", + "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nBy default, voicemail detection is disabled.", "oneOf": [ + { + "type": "string", + "enum": [ + "off" + ] + }, { "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan", "title": "Google" @@ -24816,8 +25432,14 @@ "example": "assistant-speaks-first" }, "voicemailDetection": { - "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nThis uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached.\nYou can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.", + "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nBy default, voicemail detection is disabled.", "oneOf": [ + { + "type": "string", + "enum": [ + "off" + ] + }, { "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan", "title": "Google" @@ -25707,8 +26329,14 @@ "example": "assistant-speaks-first" }, "voicemailDetection": { - "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nThis uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached.\nYou can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.", + "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nBy default, voicemail detection is disabled.", "oneOf": [ + { + "type": "string", + "enum": [ + "off" + ] + }, { "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan", "title": "Google" @@ -26508,8 +27136,14 @@ "example": "assistant-speaks-first" }, "voicemailDetection": { - "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nThis uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached.\nYou can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.", + "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nBy default, voicemail detection is disabled.", "oneOf": [ + { + "type": "string", + "enum": [ + "off" + ] + }, { "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan", "title": "Google" @@ -27551,6 +28185,10 @@ { "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout", "title": "CallHookCustomerSpeechTimeout" + }, + { + "$ref": "#/components/schemas/CallHookModelResponseTimeout", + "title": "CallHookModelResponseTimeout" } ] } @@ -27822,6 +28460,12 @@ "voicemailDetection": { "description": "This is the voicemail detection plan for the workflow.", "oneOf": [ + { + "type": "string", + "enum": [ + "off" + ] + }, { "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan", "title": "Google" @@ -28183,6 +28827,10 @@ { "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout", "title": "CallHookCustomerSpeechTimeout" + }, + { + "$ref": "#/components/schemas/CallHookModelResponseTimeout", + "title": "CallHookModelResponseTimeout" } ] } @@ -28454,6 +29102,12 @@ "voicemailDetection": { "description": "This is the voicemail detection plan for the workflow.", "oneOf": [ + { + "type": "string", + "enum": [ + "off" + ] + }, { "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan", "title": "Google" @@ -28797,6 +29451,10 @@ { "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout", "title": "CallHookCustomerSpeechTimeout" + }, + { + "$ref": "#/components/schemas/CallHookModelResponseTimeout", + "title": "CallHookModelResponseTimeout" } ] } @@ -29068,6 +29726,12 @@ "voicemailDetection": { "description": "This is the voicemail detection plan for the workflow.", "oneOf": [ + { + "type": "string", + "enum": [ + "off" + ] + }, { "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan", "title": "Google" @@ -29577,12 +30241,21 @@ "type": "object", "description": "These are the structured outputs that will be extracted from the call.\nTo enable, set `assistant.artifactPlan.structuredOutputIds` with the IDs of the structured outputs you want to extract." }, + "scorecards": { + "type": "object", + "description": "These are the scorecards that have been evaluated based on the structured outputs extracted during the call.\nTo enable, set `assistant.artifactPlan.scorecardIds` or `assistant.artifactPlan.scorecards` with the IDs or objects of the scorecards you want to evaluate." + }, "transfers": { "description": "These are the transfer records from warm transfers, including destinations, transcripts, and status.", "type": "array", "items": { "type": "string" } + }, + "structuredOutputsLastUpdatedAt": { + "format": "date-time", + "type": "string", + "description": "This is when the structured outputs were last updated" } } }, @@ -30554,8 +31227,6 @@ "call.in-progress.error-warm-transfer-assistant-cancelled", "call.in-progress.error-warm-transfer-silence-timeout", "call.in-progress.error-warm-transfer-microphone-timeout", - "call.in-progress.error-warm-transfer-hang-timeout", - "call.in-progress.error-warm-transfer-idle-timeout", "assistant-ended-call", "assistant-said-end-call-phrase", "assistant-ended-call-with-hangup-task", @@ -31066,6 +31737,10 @@ "CreateWebCallDTO": { "type": "object", "properties": { + "roomDeleteOnUserLeaveEnabled": { + "type": "boolean", + "default": true + }, "assistantId": { "type": "string", "description": "This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead.\n\nTo start a call with:\n- Assistant, use `assistantId` or `assistant`\n- Squad, use `squadId` or `squad`\n- Workflow, use `workflowId` or `workflow`" @@ -31751,10 +32426,6 @@ "type": "string", "description": "This is the unique identifier for the squad that will be used for the chat." }, - "workflowId": { - "type": "string", - "description": "This is the unique identifier for the workflow that will be used for the chat." - }, "sessionId": { "type": "string", "description": "This is the unique identifier for the session that will be used for the chat." @@ -32548,7 +33219,7 @@ "description": "This is the phone number ID that will be used for the campaign calls." }, "schedulePlan": { - "description": "This is the schedule plan for the campaign.", + "description": "This is the schedule plan for the campaign. Calls will start at startedAt and continue until your organization’s concurrency limit is reached. Any remaining calls will be retried for up to one hour as capacity becomes available. After that hour or after latestAt, whichever comes first, any calls that couldn’t be placed won’t be retried.", "allOf": [ { "$ref": "#/components/schemas/SchedulePlan" @@ -32608,7 +33279,7 @@ "description": "This is the phone number ID that will be used for the campaign calls." }, "schedulePlan": { - "description": "This is the schedule plan for the campaign.", + "description": "This is the schedule plan for the campaign. Calls will start at startedAt and continue until your organization’s concurrency limit is reached. Any remaining calls will be retried for up to one hour as capacity becomes available. After that hour or after latestAt, whichever comes first, any calls that couldn’t be placed won’t be retried.", "allOf": [ { "$ref": "#/components/schemas/SchedulePlan" @@ -32757,6 +33428,30 @@ "type": "string", "description": "This is the ISO 8601 timestamp indicating when the session was last updated." }, + "cost": { + "type": "number", + "description": "This is the cost of the session in USD." + }, + "costs": { + "type": "array", + "description": "These are the costs of individual components of the session in USD.", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ModelCost", + "title": "ModelCost" + }, + { + "$ref": "#/components/schemas/AnalysisCost", + "title": "AnalysisCost" + }, + { + "$ref": "#/components/schemas/SessionCost", + "title": "SessionCost" + } + ] + } + }, "name": { "type": "string", "description": "This is a user-defined name for the session. Maximum length is 40 characters.", @@ -38431,85 +39126,176 @@ } ] } - }, - "required": [ - "id", - "orgId", - "createdAt", - "updatedAt", - "name", - "schema" - ] - }, - "StructuredOutputPaginatedResponse": { - "type": "object", - "properties": { - "results": { - "type": "array", - "items": { - "$ref": "#/components/schemas/StructuredOutput" - } - }, - "metadata": { - "$ref": "#/components/schemas/PaginationMeta" - } - }, - "required": [ - "results", - "metadata" - ] + }, + "required": [ + "id", + "orgId", + "createdAt", + "updatedAt", + "name", + "schema" + ] + }, + "StructuredOutputPaginatedResponse": { + "type": "object", + "properties": { + "results": { + "type": "array", + "items": { + "$ref": "#/components/schemas/StructuredOutput" + } + }, + "metadata": { + "$ref": "#/components/schemas/PaginationMeta" + } + }, + "required": [ + "results", + "metadata" + ] + }, + "CreateStructuredOutputDTO": { + "type": "object", + "properties": { + "model": { + "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition.\ni.e.:\n{{structuredOutput}}\n{{structuredOutput.name}}\n{{structuredOutput.description}}\n{{structuredOutput.schema}}\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.", + "oneOf": [ + { + "$ref": "#/components/schemas/WorkflowOpenAIModel", + "title": "WorkflowOpenAIModel" + }, + { + "$ref": "#/components/schemas/WorkflowAnthropicModel", + "title": "WorkflowAnthropicModel" + }, + { + "$ref": "#/components/schemas/WorkflowGoogleModel", + "title": "WorkflowGoogleModel" + }, + { + "$ref": "#/components/schemas/WorkflowCustomModel", + "title": "WorkflowCustomModel" + } + ] + }, + "name": { + "type": "string", + "description": "This is the name of the structured output.", + "minLength": 1, + "maxLength": 40 + }, + "schema": { + "description": "This is the JSON Schema definition for the structured output.\n\nThis is required when creating a structured output. Defines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf", + "allOf": [ + { + "$ref": "#/components/schemas/JsonSchema" + } + ] + }, + "description": { + "type": "string", + "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used." + }, + "assistantIds": { + "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.", + "type": "array", + "items": { + "type": "string" + } + }, + "workflowIds": { + "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "name", + "schema" + ] + }, + "UpdateStructuredOutputDTO": { + "type": "object", + "properties": { + "model": { + "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition.\ni.e.:\n{{structuredOutput}}\n{{structuredOutput.name}}\n{{structuredOutput.description}}\n{{structuredOutput.schema}}\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.", + "oneOf": [ + { + "$ref": "#/components/schemas/WorkflowOpenAIModel", + "title": "WorkflowOpenAIModel" + }, + { + "$ref": "#/components/schemas/WorkflowAnthropicModel", + "title": "WorkflowAnthropicModel" + }, + { + "$ref": "#/components/schemas/WorkflowGoogleModel", + "title": "WorkflowGoogleModel" + }, + { + "$ref": "#/components/schemas/WorkflowCustomModel", + "title": "WorkflowCustomModel" + } + ] + }, + "name": { + "type": "string", + "description": "This is the name of the structured output.", + "minLength": 1, + "maxLength": 40 + }, + "description": { + "type": "string", + "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used." + }, + "assistantIds": { + "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.", + "type": "array", + "items": { + "type": "string" + } + }, + "workflowIds": { + "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.", + "type": "array", + "items": { + "type": "string" + } + }, + "schema": { + "description": "This is the JSON Schema definition for the structured output.\n\nDefines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf", + "allOf": [ + { + "$ref": "#/components/schemas/JsonSchema" + } + ] + } + } }, - "CreateStructuredOutputDTO": { + "StructuredOutputRunDTO": { "type": "object", "properties": { - "model": { - "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition.\ni.e.:\n{{structuredOutput}}\n{{structuredOutput.name}}\n{{structuredOutput.description}}\n{{structuredOutput.schema}}\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.", - "oneOf": [ - { - "$ref": "#/components/schemas/WorkflowOpenAIModel", - "title": "WorkflowOpenAIModel" - }, - { - "$ref": "#/components/schemas/WorkflowAnthropicModel", - "title": "WorkflowAnthropicModel" - }, - { - "$ref": "#/components/schemas/WorkflowGoogleModel", - "title": "WorkflowGoogleModel" - }, - { - "$ref": "#/components/schemas/WorkflowCustomModel", - "title": "WorkflowCustomModel" - } - ] + "previewEnabled": { + "type": "boolean", + "description": "This is the preview flag for the re-run. If true, the re-run will be executed and the response will be returned immediately and the call artifact will NOT be updated.\nIf false (default), the re-run will be executed and the response will be updated in the call artifact.", + "default": false }, - "name": { + "structuredOutputId": { "type": "string", - "description": "This is the name of the structured output.", - "minLength": 1, - "maxLength": 40 + "description": "This is the ID of the structured output that will be run. This must be provided unless a transient structured output is provided.\nWhen the re-run is executed, only the value of this structured output will be replaced with the new value, or added if not present." }, - "schema": { - "description": "This is the JSON Schema definition for the structured output.\n\nThis is required when creating a structured output. Defines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf", + "structuredOutput": { + "description": "This is the transient structured output that will be run. This must be provided if a structured output ID is not provided.\nWhen the re-run is executed, the structured output value will be added to the existing artifact.", "allOf": [ { - "$ref": "#/components/schemas/JsonSchema" + "$ref": "#/components/schemas/CreateStructuredOutputDTO" } ] }, - "description": { - "type": "string", - "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used." - }, - "assistantIds": { - "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.", - "type": "array", - "items": { - "type": "string" - } - }, - "workflowIds": { - "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.", + "callIds": { + "description": "This is the array of callIds that will be updated with the new structured output value. If preview is true, this array must be provided and contain exactly 1 callId.\nIf preview is false, up to 100 callIds may be provided.", "type": "array", "items": { "type": "string" @@ -38517,68 +39303,9 @@ } }, "required": [ - "name", - "schema" + "callIds" ] }, - "UpdateStructuredOutputDTO": { - "type": "object", - "properties": { - "model": { - "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition.\ni.e.:\n{{structuredOutput}}\n{{structuredOutput.name}}\n{{structuredOutput.description}}\n{{structuredOutput.schema}}\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.", - "oneOf": [ - { - "$ref": "#/components/schemas/WorkflowOpenAIModel", - "title": "WorkflowOpenAIModel" - }, - { - "$ref": "#/components/schemas/WorkflowAnthropicModel", - "title": "WorkflowAnthropicModel" - }, - { - "$ref": "#/components/schemas/WorkflowGoogleModel", - "title": "WorkflowGoogleModel" - }, - { - "$ref": "#/components/schemas/WorkflowCustomModel", - "title": "WorkflowCustomModel" - } - ] - }, - "name": { - "type": "string", - "description": "This is the name of the structured output.", - "minLength": 1, - "maxLength": 40 - }, - "description": { - "type": "string", - "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used." - }, - "assistantIds": { - "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.", - "type": "array", - "items": { - "type": "string" - } - }, - "workflowIds": { - "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.", - "type": "array", - "items": { - "type": "string" - } - }, - "schema": { - "description": "This is the JSON Schema definition for the structured output.\n\nDefines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf", - "allOf": [ - { - "$ref": "#/components/schemas/JsonSchema" - } - ] - } - } - }, "TesterPlan": { "type": "object", "properties": { @@ -39938,11 +40665,20 @@ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.", "minimum": 50, "maximum": 10000 + }, + "messages": { + "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.", + "example": "{", + "type": "array", + "items": { + "type": "object" + } } }, "required": [ "provider", - "model" + "model", + "messages" ] }, "EvalAnthropicModel": { @@ -39992,11 +40728,20 @@ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.", "minimum": 50, "maximum": 10000 + }, + "messages": { + "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.", + "example": "{", + "type": "array", + "items": { + "type": "object" + } } }, "required": [ "provider", - "model" + "model", + "messages" ] }, "EvalGoogleModel": { @@ -40041,11 +40786,20 @@ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.", "minimum": 50, "maximum": 10000 + }, + "messages": { + "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.", + "example": "{", + "type": "array", + "items": { + "type": "object" + } } }, "required": [ "provider", - "model" + "model", + "messages" ] }, "EvalGroqModel": { @@ -40091,11 +40845,20 @@ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.", "minimum": 50, "maximum": 10000 + }, + "messages": { + "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.", + "example": "{", + "type": "array", + "items": { + "type": "object" + } } }, "required": [ "provider", - "model" + "model", + "messages" ] }, "EvalCustomModel": { @@ -40138,12 +40901,21 @@ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.", "minimum": 50, "maximum": 10000 + }, + "messages": { + "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.", + "example": "{", + "type": "array", + "items": { + "type": "object" + } } }, "required": [ "provider", "url", - "model" + "model", + "messages" ] }, "AssistantMessageJudgePlanAI": { @@ -40541,6 +41313,19 @@ "$ref": "#/components/schemas/EvalRunResult" } }, + "cost": { + "type": "number", + "description": "This is the cost of the eval or suite run in USD.", + "example": 0.01 + }, + "costs": { + "description": "This is the break up of costs of the eval or suite run.", + "example": "[{ type: \"model\", model: \"gpt-4o\", cost: 0.01 }]", + "type": "array", + "items": { + "type": "object" + } + }, "type": { "type": "string", "description": "This is the type of the run.\nCurrently it is fixed to `eval`.", @@ -40565,6 +41350,8 @@ "startedAt", "endedAt", "results", + "cost", + "costs", "type" ] }, @@ -40753,6 +41540,107 @@ "type" ] }, + "Scorecard": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "This is the unique identifier for the scorecard." + }, + "orgId": { + "type": "string", + "description": "This is the unique identifier for the org that this scorecard belongs to." + }, + "createdAt": { + "format": "date-time", + "type": "string", + "description": "This is the ISO 8601 date-time string of when the scorecard was created." + }, + "updatedAt": { + "format": "date-time", + "type": "string", + "description": "This is the ISO 8601 date-time string of when the scorecard was last updated." + }, + "name": { + "type": "string", + "description": "This is the name of the scorecard. It is only for user reference and will not be used for any evaluation.", + "maxLength": 80 + }, + "description": { + "type": "string", + "description": "This is the description of the scorecard. It is only for user reference and will not be used for any evaluation.", + "maxLength": 500 + }, + "metrics": { + "description": "These are the metrics that will be used to evaluate the scorecard.\nEach metric will have a set of conditions and points that will be used to generate the score.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ScorecardMetric" + } + }, + "assistantIds": { + "description": "These are the assistant IDs that this scorecard is linked to.\nWhen linked to assistants, this scorecard will be available for evaluation during those assistants' calls.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "id", + "orgId", + "createdAt", + "updatedAt", + "metrics" + ] + }, + "ScorecardPaginatedResponse": { + "type": "object", + "properties": { + "results": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Scorecard" + } + }, + "metadata": { + "$ref": "#/components/schemas/PaginationMeta" + } + }, + "required": [ + "results", + "metadata" + ] + }, + "UpdateScorecardDTO": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "This is the name of the scorecard. It is only for user reference and will not be used for any evaluation.", + "maxLength": 80 + }, + "description": { + "type": "string", + "description": "This is the description of the scorecard. It is only for user reference and will not be used for any evaluation.", + "maxLength": 500 + }, + "metrics": { + "description": "These are the metrics that will be used to evaluate the scorecard.\nEach metric will have a set of conditions and points that will be used to generate the score.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ScorecardMetric" + } + }, + "assistantIds": { + "description": "These are the assistant IDs that this scorecard is linked to.\nWhen linked to assistants, this scorecard will be available for evaluation during those assistants' calls.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, "CreateOrgDTO": { "type": "object", "properties": { @@ -49614,8 +50502,6 @@ "call.in-progress.error-warm-transfer-assistant-cancelled", "call.in-progress.error-warm-transfer-silence-timeout", "call.in-progress.error-warm-transfer-microphone-timeout", - "call.in-progress.error-warm-transfer-hang-timeout", - "call.in-progress.error-warm-transfer-idle-timeout", "assistant-ended-call", "assistant-said-end-call-phrase", "assistant-ended-call-with-hangup-task", @@ -50912,8 +51798,6 @@ "call.in-progress.error-warm-transfer-assistant-cancelled", "call.in-progress.error-warm-transfer-silence-timeout", "call.in-progress.error-warm-transfer-microphone-timeout", - "call.in-progress.error-warm-transfer-hang-timeout", - "call.in-progress.error-warm-transfer-idle-timeout", "assistant-ended-call", "assistant-said-end-call-phrase", "assistant-ended-call-with-hangup-task", @@ -53646,6 +54530,26 @@ "cost" ] }, + "SessionCost": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "This is the type of cost, always 'session' for this class.", + "enum": [ + "session" + ] + }, + "cost": { + "type": "number", + "description": "This is the cost of the component in USD." + } + }, + "required": [ + "type", + "cost" + ] + }, "FunctionToolWithToolCall": { "type": "object", "properties": {