typings/lib2/llmcomponent/llmComponentContext.js

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.LlmComponentContext = void 0;
const baseContext_1 = require("../../lib/component/baseContext");
// Response template
const RESPONSE = {
    context: undefined
};
/**
 * The Bots LlmComponentContext is a class with convenience methods when working wih large language models
 * </p>
 * An LlmComponentContext class instance is passed as an argument to every llm event handler function.
 * @memberof module:Lib
 * @extends BaseContext
 * @alias LlmComponentContext
 */
class LlmComponentContext extends baseContext_1.BaseContext {
    /**
     * Constructor of rest service context.
     * DO NOT USE - INSTANCE IS ALREADY PASSED TO EVENT HANDLERS
     * @param {object} request
     */
    constructor(request) {
        // Initilize the response
        const response = Object.assign({}, RESPONSE, {
            messageHistory: request.messageHistory,
        });
        super(request, response);
        this._chatEntries = request.messageHistory.entries;
        this._turn = request.messageHistory.turn;
    }
    /**
     * Adds a message to the bot response sent to the user.
     * @param {object} payload - can take a string message, or a message created using the MessageFactory
     */
    addMessage(payload) {
        this.getResponse().messages = this.getResponse().messages || [];
        this.getResponse().messages.push(super.constructMessagePayload(payload));
    }
    /**
     * Set a transition action. When you use this function, the dialog engine will transition to the state defined for this transition action.
     * <p>
     * @param {string} action - name of the transition action
     */
    setTransitionAction(action) {
        this.getResponse().transitionAction = action;
    }
    /**
     * Sets an LLM prompt that will be sent next to the LLM
     * <p>
     * @param {string} prompt - the text of the prompt
     * @param {boolean} isRetry - is the prompt used to try to fix a prior invalid LLM response
     */
    setNextLLMPrompt(prompt, isRetry) {
        this.getResponse().llmPrompt = prompt;
        this.getResponse().retryPrompt = isRetry;
    }
    /**
     * Set the value of the LLM result variable
     * <p>
     * @param {object} result - the value
     */
    setResultVariable(result) {
        const varName = this.getRequest().resultVariable;
        if (varName) {
            this.setVariable(varName, result);
        }
    }
    /**
     * Get the value of the LLM result variable
     * <p>
     * @returns {object} the result value
     */
    getResultVariable() {
        const varName = this.getRequest().resultVariable;
        if (varName) {
            return this.getVariable(varName);
        }
        return undefined;
    }
    /**
     * Array of chat messages that are exchanged with the LLM.
     * Each message has the following properties:
     * - role: the role under which the message is sent: system, user or assistant
     * - content: the message text
     * - turn: number indicating the refinement turn of the chat messages exchange
     * @returns {ChatEntry[]} the chat messages
     */
    getChatHistory() {
        return this._chatEntries;
    }
    /**
     * Returns number indicating the current refinement turn of the chat messages exchange.
     * When the first prompt is sent to the LLM the turn is 1.
     * @returns {number} the turn
     */
    getCurrentTurn() {
        return this._turn;
    }
    /**
     * Returns the LLM system prompt
     * @returns {string} the prompt
     */
    getSystemPrompt() {
        return this._chatEntries[0].content;
    }
    /**
     * Returns the last response sent by the LLM
     * @returns {ChatEntry} the message
     */
    getLastAssistantMessage() {
        return this.getLastMessage('assistant');
    }
    /**
     * Returns the last message sent by the user
     * @returns {ChatEntry} the message
     */
    getLastUserMessage() {
        return this.getLastMessage('user');
    }
    /**
     * Update the LLM system prompt
     * @param {string} prompt - the new prompt
     */
    updateSystemPrompt(prompt) {
        this._chatEntries[0].content = prompt;
    }
    /**
     * Update the last LLM response message
     * @param {string} message - the new message
     */
    updateLastAssistantMessage(message) {
        for (let index = this._chatEntries.length - 1; index > 0; index--) {
            if (this._chatEntries[index].role === 'assistant') {
                this._chatEntries[index].content = message;
                break;
            }
        }
    }
    /**
     * Set the request or LLM response validation error
     * @param {string} errorMessage - the error message
     * @param {string} errorCode - allowable values: 'requestFlagged', 'responseFlagged', 'requestInvalid',
     *  'responseInvalid', 'modelLengthExceeded'
     */
    setValidationError(errorMessage, errorCode) {
        this.getResponse().validationErrorMessage = errorMessage;
        this.getResponse().validationErrorCode = errorCode;
    }
    /**
     * Converts the message to a JSON object:
     * - it first search the first occurrence of open-curly-bracket '{' and last occurrence of close-curly-bracket '}'
     * - it then tries to parse the message between the open and close curly brackets into a JSON object.
     * - if parsing is successful, the JSON object is returned, otherwise the method returns undefined
     * @param {string} message - the message to convert
     * @returns {object | undefined} the parsed message, or undefined
     */
    convertToJSON(message) {
        const startIndex = message.indexOf('{');
        const endIndex = message.lastIndexOf('}');
        if (!(startIndex >= 0 && endIndex > startIndex)) {
            return undefined;
        }
        else {
            let jsonText = message.slice(startIndex, endIndex + 1).replace('/\\n', '').replace('/\\r', '');
            try {
                return JSON.parse(jsonText);
            }
            catch (err) {
                return undefined;
            }
        }
    }
    /**
     * Returns true when JSON formatting is enabled in the associated Invoke LLM state in Visual Fow Designer
     */
    isJsonValidationEnabled() {
        return this.getRequest().validateJson;
    }
    /**
     * Returns the number of retry prompts that have been sent to the LLM since the last successful LLM response
     */
    getRetries() {
        return this.getResponse().messageHistory.retries;
    }
    /**
     * Returns the maximum number of retry prompts that will be sent to the LLM when the response is invalid
     * @returns {number} the maximum number
     */
    getMaxRetries() {
        return this.getRequest().maxRetries;
    }
    /**
     * Returns the status message that is sent to the user when the LLM is invoked with a retry prompt
     * @returns {string} the message
     */
    getRetryUserMessage() {
        return this.getRequest().retryUserMessage;
    }
    /**
     * Returns the JSON schema used to validate the LLM response
     * @returns {object} the json schema
     */
    getJsonSchema() {
        return this.getRequest().jsonSchema;
    }
    /**
     * Returns the template used to enrich the system prompt with instructions to comply with a JSON schema
     * @returns {string} the instrution
     */
    getJsonSchemaInstructionTemplate() {
        return this.getRequest().jsonSchemaInstruction;
    }
    /**
     * Returns the template used to send a retry prompt to the LLM when validation errors have been found.
     * @returns {string} the instrution
     */
    getInvalidResponseTemplate() {
        return this.getRequest().invalidResponsePrompt;
    }
    /**
     * Sets the value of a custom property that is stored in the LLM context. A custom property can be
     * used to maintain custom state accross event handler calls while interacting with the LLM within the
     * current state in visual flow designer.
     * If you set the value to null, the custom property will be removed.
     * @param {string} name - name of the custom property
     * @param {object} value - value of the custom property
     */
    setCustomProperty(name, value) {
        if (value === null && this.getResponse().messageHistory.customProperties) {
            delete this.getResponse().messageHistory.customProperties[name];
        }
        else {
            if (!this.getResponse().messageHistory.customProperties) {
                this.getResponse().messageHistory.customProperties = {};
            }
            this.getResponse().messageHistory.customProperties[name] = value;
        }
    }
    /**
     * Returns the value of a custom property that is stored in the LLM context. A custom property can be
     * used to maintain custom state accross event handler calls while interacting with the LLM within the
     * current state  in visual flow designer.
     * @return {object} value of the custom property
     * @param {string} name - name of the custom property
     */
    getCustomProperty(name) {
        if (this.getResponse().messageHistory.customProperties) {
            return this.getResponse().messageHistory.customProperties[name];
        }
        return undefined;
    }
    /**
     * Create a postback action that sends a new prompt to the LLM.
     * <p>
     * @param {string} label - the label of the postback action button
     * @param {string} prompt - the text of the prompt
     * @returns {PostbackAction} the postback action
     */
    createLLMPromptAction(label, prompt) {
        return this.getMessageFactory().createPostbackAction(label, { 'action': 'system.textReceived', 'variables': { 'system.text': prompt } });
    }
    /**
     * Enriches the system prompt with JSON schema formatting instruction
     */
    addJSONSchemaFormattingInstruction() {
        if (this.getJsonSchema()) {
            let prompt = this.getJsonSchemaInstructionTemplate().replace('{0}', JSON.stringify(this.getJsonSchema())).replace('{1}', this.getSystemPrompt());
            this.updateSystemPrompt(prompt);
        }
    }
    /**
     * Handles an invalid LLM response by sending a retry prompt to the LLM if the maximum number of retries has
     * not been reached yet. If maximum number of retries is reached, a validation error is set which results in a
     * transition out of the LLM component using the 'error' transition.
     * <p>
     * @param {string[]} errors - messages describing what is invalid about the response
     * @returns {false} always returns false
     */
    handleInvalidResponse(errors) {
        this.getResponse().messageHistory.allValidationErrors = errors;
        if (this.getRetries() < this.getMaxRetries()) {
            if (this.getRetryUserMessage()) {
                this.addMessage(this.getRetryUserMessage());
            }
            this.setNextLLMPrompt(this.getInvalidResponseTemplate().replace('{0}', errors.join('\n- ')), true);
        }
        else {
            this.setValidationError(errors.join('\n'));
        }
        return false;
    }
    getLastMessage(role) {
        for (let index = this._chatEntries.length - 1; index > 0; index--) {
            if (this._chatEntries[index].role === role) {
                return this._chatEntries[index];
            }
        }
        return undefined;
    }
}
exports.LlmComponentContext = LlmComponentContext;