LemurRequestDetails: {
    answer_format?: string;
    context?: OneOf<[string, {
        [key: string]: unknown;
    }]>;
    created_at: Date;
    final_model: LiteralUnion<LemurModel, string>;
    input_text?: string;
    max_output_size: number;
    prompt?: string;
    questions?: LemurQuestion[];
    request_endpoint: string;
    temperature: number;
    transcript_ids?: string[];
}

Type declaration

  • Optionalanswer_format?: string

    The format to use for the model's answers.

  • Optionalcontext?: OneOf<[string, {
        [key: string]: unknown;
    }]>

    Context provided to the model. This can be a string or a free-form JSON value.

  • created_at: Date

    The date when the request was created

  • final_model: LiteralUnion<LemurModel, string>

    The model that was used for the final prompt after compression is performed.

    "default"
    
  • Optionalinput_text?: string

    Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000". Use either transcript_ids or input_text as input into LeMUR.

  • max_output_size: number

    Max output size in tokens, up to 4000

    2000
    
  • Optionalprompt?: string

    The prompt used for the model.

  • Optionalquestions?: LemurQuestion[]

    A list of questions asked in the request Each question can have its own context and answer format.

  • request_endpoint: string

    The endpoint used for the leMUR request

  • temperature: number

    The temperature to use for the model. Higher values result in answers that are more creative, lower values are more conservative. Can be any value between 0.0 and 1.0 inclusive.

    0
    
  • Optionaltranscript_ids?: string[]

    A list of completed transcripts with text. Use either transcript_ids or input_text as input into LeMUR.

{
"request_id": "5e1b27c2-691f-4414-8bc5-f14678442f9e",
"usage": {
"input_tokens": 27,
"output_tokens": 3
}
}