diff --git a/.github/workflows/azure-deploy-prod_maht.yml b/.github/workflows/azure-deploy-prod_maht.yml index cbd7a1d..d7e18f7 100644 --- a/.github/workflows/azure-deploy-prod_maht.yml +++ b/.github/workflows/azure-deploy-prod_maht.yml @@ -12,12 +12,12 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Node.js version - uses: actions/setup-node@v2 + uses: actions/setup-node@v4 with: - node-version: '18.x' + node-version: 20 cache: 'npm' - name: Install dependencies @@ -29,7 +29,7 @@ jobs: npm run test --if-present - name: Upload artifact for deployment job - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4.3.3 with: name: node-app path: . @@ -43,13 +43,13 @@ jobs: steps: - name: Download artifact from build job - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4.1.7 with: name: node-app - name: 'Deploy to Azure Web App' id: deploy-to-webapp - uses: azure/webapps-deploy@v2 + uses: azure/webapps-deploy@v3 with: app-name: 'maht' slot-name: 'Production' diff --git a/inc/js/agents/system/asset-assistant.mjs b/inc/js/agents/system/asset-assistant.mjs index e1a98e1..d07e664 100644 --- a/inc/js/agents/system/asset-assistant.mjs +++ b/inc/js/agents/system/asset-assistant.mjs @@ -3,15 +3,15 @@ import fs from 'fs' import mime from 'mime-types' import FormData from 'form-data' import axios from 'axios' -// modular constants +// module constants const { MYLIFE_EMBEDDING_SERVER_BEARER_TOKEN, MYLIFE_EMBEDDING_SERVER_FILESIZE_LIMIT, MYLIFE_EMBEDDING_SERVER_FILESIZE_LIMIT_ADMIN, MYLIFE_SERVER_MBR_ID: mylifeMbrId, } = process.env const bearerToken = MYLIFE_EMBEDDING_SERVER_BEARER_TOKEN const fileSizeLimit = parseInt(MYLIFE_EMBEDDING_SERVER_FILESIZE_LIMIT) || 1048576 const fileSizeLimitAdmin = parseInt(MYLIFE_EMBEDDING_SERVER_FILESIZE_LIMIT_ADMIN) || 10485760 -// modular variables +// module variables let AgentFactory let Globals -// modular class definition +// module class definition class oAIAssetAssistant { // pseudo-constructor #ctx // @todo: only useful if assistant only exists for duration of request @@ -21,7 +21,7 @@ class oAIAssetAssistant { // primary direct assignment this.#ctx = _ctx this.#mbr_id = this.#ctx.state.member.mbr_id - // modular direct assignment + // module direct assignment if(!AgentFactory) AgentFactory = this.#ctx.AgentFactory if(!Globals) Globals = this.#ctx.Globals // secondary direct assignment diff --git a/inc/js/agents/system/evolution-assistant.mjs b/inc/js/agents/system/evolution-assistant.mjs index 859d6e6..625a3df 100644 --- a/inc/js/agents/system/evolution-assistant.mjs +++ b/inc/js/agents/system/evolution-assistant.mjs @@ -1,7 +1,7 @@ // imports import { _ } from 'ajv' import { EventEmitter } from 'events' -/* modular constants */ +/* module constants */ const _phases = [ 'create', 'init', @@ -123,10 +123,10 @@ export class EvolutionAssistant extends EventEmitter { } } } -/* modular functions */ +/* module functions */ /** * Advance the phase of the Evolution Assistant. Logic is encapsulated to ensure that the phase is advanced only when appropriate, ergo, not every request _to_ advancePhase() will actually _do_ so. Isolates and privatizes logic to propose _advance_ to next phase. - * @modular + * @module * @emits {evo-agent-phase-change} - Emitted when the phase advances. * @param {EvolutionAssistant} _evoAgent - `this` Evolution Assistant. * @returns {string} The determined phase. @@ -148,7 +148,7 @@ async function mAdvancePhase(_evoAgent){ // **note**: treat parameter `_evoAge const contributionsPromises = mAssessData(_evoAgent) .map(_category => mGetContribution(_evoAgent, _category, _formalPhase)) // Returns array of promises _proposal.contributions = await Promise.all(contributionsPromises) } - // alterations sent as proposal to be adopted (or not, albeit no current mechanism to reject) by instantiated evo-agent [only viable caller by modular design] + // alterations sent as proposal to be adopted (or not, albeit no current mechanism to reject) by instantiated evo-agent [only viable caller by module design] _proposal.phase = (mEvolutionPhaseComplete(_evoAgent,_formalPhase)) ? 'init' : 'develop' @@ -168,7 +168,7 @@ async function mAdvancePhase(_evoAgent){ // **note**: treat parameter `_evoAge } /** * Reviews properties of avatar and returns an array of three categories most in need of member Contributions. - * @modular + * @module * @param {EvolutionAssistant} _evoAgent - The avatar evoAgent whose data requires assessment. * @param {number} _numCategories - The number of categories to return. Defaults to 5. minimum 1, maximum 9. * @returns {Array} The top number categories requiring Contributions. @@ -228,7 +228,7 @@ function mAssignContributionListeners(_evoAgent, _contribution) { } /** * Determines whether the given phase is complete. - * @modular + * @module * @param {EvolutionAssistant} _evoAgent - `this` Evolution Assistant. * @param {string} _phase - The phase to check for completion. */ @@ -243,7 +243,7 @@ function mEvolutionPhaseComplete(_evoAgent,_phase) { } /** * Formats a category string to a format consistent with Cosmos key structure: all lowercase, underscores for spaces, limit of 64-characters. - * @modular + * @module * @param {string} _category - The category to format. * @returns {string} The formatted category. */ @@ -256,7 +256,7 @@ function mFormatCategory(_category) { } /** * Digest a request to generate a new Contribution. - * @modular + * @module * @emits {on-contribution-new} - Emitted when a new Contribution is generated. * @param {EvolutionAssistant} _evoAgent - `this` Evolution Assistant. * @param {string} _category - The category to process. @@ -287,7 +287,7 @@ async function mGetContribution(_evoAgent, _category, _phase) { } /** * Log an object to the console and emit it to the parent. - * @modular + * @module * @emits {_emit_text} - Emitted when an object is logged. * @param {string} _emit_text - The text to emit. * @param {EvolutionAssistant} _evoAgent - `this` Evolution Assistant. @@ -298,7 +298,7 @@ function mLog(_emit_text,_evoAgent,_object) { } /** * Process a Contribution. First update the Contribution object, determining if the Contribution stage is updated. Then evaluate Evolution phase for completeness and advancement. - * @modular + * @module * @param {EvolutionAssistant} _evoAgent - `this` Evolution Assistant. * @param {Array} _contributions - The contributions array. * @param {object} _current - Contribution object { category, contributionId, message } diff --git a/inc/js/api-functions.mjs b/inc/js/api-functions.mjs index 6036863..2b505da 100644 --- a/inc/js/api-functions.mjs +++ b/inc/js/api-functions.mjs @@ -1,7 +1,16 @@ import chalk from "chalk" /* variables */ const mBotSecrets = JSON.parse(process.env.OPENAI_JWT_SECRETS) -/* public modular functions */ +/* public module functions */ +/** + * Returns all publicly-available experiences. + * @param {Koa} ctx - Koa Context object. + * @returns {Object[]} - Array of Experience Objects. + */ +async function availableExperiences(ctx){ + ctx.body = await ctx.MyLife.availableExperiences() + console.log(chalk.yellowBright('availableExperiences()', ctx.body)) +} // @todo implement builder functionality, allowing for interface creation of experiences by members // @todo implement access to exposed member experiences using `mbr_key` as parameter to `factory.getItem()` async function experienceBuilder(ctx){ @@ -127,7 +136,7 @@ async function keyValidation(ctx){ // from openAI } /** * All functionality related to a library. Note: Had to be consolidated, as openai GPT would only POST. - * @modular + * @module * @public * @param {Koa} ctx - Koa Context object * @returns {Koa} Koa Context object @@ -151,7 +160,7 @@ async function library(ctx){ } /** * Login function for member. Requires mid in params. - * @modular + * @module * @public * @param {Koa} ctx - Koa Context object * @returns {Koa} Koa Context object @@ -244,7 +253,7 @@ async function storyLibrary(ctx){ } /** * Validates api token - * @modular + * @module * @public * @param {object} ctx Koa context object * @param {function} next Koa next function @@ -278,10 +287,10 @@ async function tokenValidation(ctx, next) { return } } -/* "private" modular functions */ +/* "private" module functions */ /** * Validates key and sets `ctx.state` and `ctx.session` properties. `ctx.state`: [ assistantType, isValidated, mbr_id, ]. `ctx.session`: [ isAPIValidated, APIMemberKey, ]. - * @modular + * @module * @private * @async * @param {Koa} ctx - Koa Context object. @@ -328,6 +337,7 @@ function mTokenValidation(_token){ } /* exports */ export { + availableExperiences, experience, experienceCast, experienceEnd, diff --git a/inc/js/core.mjs b/inc/js/core.mjs index 5af5695..7e8306d 100644 --- a/inc/js/core.mjs +++ b/inc/js/core.mjs @@ -260,6 +260,23 @@ class MyLife extends Organization { // form=server constructor(factory){ // no session presumed to exist super(factory) } + /** + * Retrieves all public experiences (i.e., owned by MyLife). + * @returns {Object[]} - An array of the currently available public experiences. + */ + async availableExperiences(){ + const experiences = ( await this.factory.availableExperiences() ) + .map(experience=>{ // map to display versions [from `mylife-avatar.mjs`] + const { autoplay=false, description, id, name, purpose, skippable=true, } = experience + return { + description, + id, + name, + purpose, + } + }) + return experiences + } async datacore(_mbr_id){ if(!_mbr_id || _mbr_id===this.mbr_id) throw new Error('datacore cannot be accessed') return await this.factory.datacore(_mbr_id) diff --git a/inc/js/factory-class-extenders/class-contribution-functions.mjs b/inc/js/factory-class-extenders/class-contribution-functions.mjs index dc013e6..37ba5bf 100644 --- a/inc/js/factory-class-extenders/class-contribution-functions.mjs +++ b/inc/js/factory-class-extenders/class-contribution-functions.mjs @@ -1,7 +1,7 @@ /* variables */ const { MYLIFE_ALLOW_INTELLIGENT_QUESTIONS } = process.env const allowLLMQuestions = JSON.parse(MYLIFE_ALLOW_INTELLIGENT_QUESTIONS ?? 'false') -/* contribution modular functions */ +/* contribution module functions */ /** * Gets questions from Cosmos, but could request from openAI. * @param {Contribution} _contribution Contribution object @@ -25,7 +25,7 @@ async function mGetQuestions(_contribution, _openai){ return ['What is the meaning of life?'] /* @todo: refactor for gpt's const _response = await _evoAgent.openai.completions.create({ - model: 'gpt-3.5-turbo-instruct', + model: 'gpt-4o', prompt: 'give a list of 3 questions (markdown bullets) used to ' + ( (!this.request.content) ? `get more information about a ${this.request.impersonation} regarding its ${this.request.category}` @@ -48,7 +48,7 @@ async function mGetQuestions(_contribution, _openai){ } /** * Updates contribution object with incoming contribution data. - * @modular + * @module * @param {Contribution} _contribution - Contribution object * @param {object} _obj - Contribution data { category, contributionId, content??question??message } * @returns {void} @@ -63,10 +63,10 @@ function mUpdateContribution(_contribution, _obj){ } mEvaluateStatus(_contribution) // evaluates readiness for next stage of Contribution } -/* contribution "private" modular functions [unexported] */ +/* contribution "private" module functions [unexported] */ /** * Evaluates Contribution and may update `status` property. - * @modular + * @module * @param {Contribution} _contribution - Contribution object * @returns {void} */ diff --git a/inc/js/factory-class-extenders/class-experience-functions.mjs b/inc/js/factory-class-extenders/class-experience-functions.mjs index 572c9a1..fad4159 100644 --- a/inc/js/factory-class-extenders/class-experience-functions.mjs +++ b/inc/js/factory-class-extenders/class-experience-functions.mjs @@ -1,4 +1,4 @@ -/* experience modular constants */ +/* experience module constants */ const mAvailableEventActionMap = { appear: { effects: ['fade', 'spotlight'], @@ -9,14 +9,14 @@ const mAvailableEventActionMap = { }, input: {}, } -/* experience modular functions */ +/* experience module functions */ function mAppear(event){ const { id, type, data: eventData } = event return } /** * From an event, returns a `synthetic` Dialog data package, see JSDoc properties. - * @modular + * @module * @public * @param {Experience} _experience - Experience class instance. * @param {number} iteration - Iteration number, defaults to first (array zero format). @@ -90,7 +90,7 @@ function mGetEvent(scenes, eventId){ } /** * From an event, returns a `synthetic` Dialog data package, above and an . - * @modular + * @module * @public * @param {ExperienceEvent} event - Experience class instance. * @param {number} iteration - Iteration number, defaults to first (array zero format). diff --git a/inc/js/factory-class-extenders/class-extenders.mjs b/inc/js/factory-class-extenders/class-extenders.mjs index 89eb089..232386d 100644 --- a/inc/js/factory-class-extenders/class-extenders.mjs +++ b/inc/js/factory-class-extenders/class-extenders.mjs @@ -153,7 +153,8 @@ function extendClass_conversation(originClass, referencesObject) { * @returns {Object[]} - The updated messages array. */ addMessage(message){ - if(this.messages.find(_message=>_message.id===message.id)) + const { id, } = message + if(this.messages.find(message=>message.id===id)) return this.messages if(!(message instanceof this.#factory.message)){ if(typeof message!=='object') @@ -401,7 +402,7 @@ function extendClass_message(originClass, referencesObject) { */ class Message extends originClass { #content - constructor(obj) { + constructor(obj){ const { content, ..._obj } = obj super(_obj) try{ diff --git a/inc/js/factory-class-extenders/class-message-functions.mjs b/inc/js/factory-class-extenders/class-message-functions.mjs index b145a05..72b97fa 100644 --- a/inc/js/factory-class-extenders/class-message-functions.mjs +++ b/inc/js/factory-class-extenders/class-message-functions.mjs @@ -1,7 +1,7 @@ -/* public modular functions */ +/* public module functions */ /** * Assigns content (from _message.message) to message object. - * @modular + * @module * @public * @param {any} obj - Element to assign to `content` property * @returns {string} - message text content @@ -47,10 +47,10 @@ function mAssignContent(obj){ return `${obj}` } } -/* private modular functions */ +/* private module functions */ /** * When incoming text is too large for a single message, generate dynamic text file and attach/submit. - * @modular + * @module * @private * @param {string} _file - The file to construct. * @returns @@ -71,7 +71,7 @@ async function mConstructFile(_file){ } /** * Checks if content is a non-empty string. - * @modular + * @module * @private - not exposed via export * @param {string} content - message content * @returns {boolean} - true if content is a non-empty string diff --git a/inc/js/functions.mjs b/inc/js/functions.mjs index 013d275..92cb745 100644 --- a/inc/js/functions.mjs +++ b/inc/js/functions.mjs @@ -7,7 +7,7 @@ async function about(ctx){ } /** * Activate a bot for the member - * @modular + * @module * @public * @api no associated view * @param {object} ctx Koa Context object @@ -57,7 +57,7 @@ function category(ctx){ // sets category for avatar } /** * Challenge the member session with a passphrase. - * @modular + * @module * @public * @async * @api - No associated view @@ -109,6 +109,19 @@ async function deleteItem(ctx){ ctx.throw(400, `missing item id`) ctx.body = await ctx.state.avatar.deleteItem(iid) } +/** + * Request help about MyLife. + * @param {Koa} ctx - Koa Context object, body={ request: string|required, mbr_id, type: string, }. + * @returns {object} - Help response message object. + */ +async function help(ctx){ + const { helpRequest, type=`general`, } = ctx.request?.body + if(!helpRequest?.length) + ctx.throw(400, `missing help request text`) + const { avatar } = ctx.state + const _avatar = type==='membership' ? avatar : ctx.MyLife.avatar + ctx.body = await _avatar.help(helpRequest, type) +} /** * Index page for the application. * @async @@ -121,7 +134,7 @@ async function index(ctx){ } /** * Set or get the avatar interface mode for the member. - * @modular + * @module * @public * @api - No associated view * @param {object} ctx - Koa Context object @@ -264,7 +277,7 @@ function mGetContributions(ctx){ /** * Manage receipt and setting of contributions(s). * @async - * @modular + * @module * @param {object} ctx Koa Context object */ function mSetContributions(ctx){ @@ -291,6 +304,7 @@ export { collections, contributions, deleteItem, + help, index, interfaceMode, login, diff --git a/inc/js/globals.mjs b/inc/js/globals.mjs index 21986c1..f950840 100644 --- a/inc/js/globals.mjs +++ b/inc/js/globals.mjs @@ -70,7 +70,7 @@ const mAiJsFunctions = { } const mEmailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/ // regex for email validation const mGuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[4][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i // regex for GUID validation -// modular classes +// module classes class Globals extends EventEmitter { constructor() { // essentially this is a coordinating class wrapper that holds all of the sensitive data and functionality; as such, it is a singleton, and should either _be_ the virtual server or instantiated on one at startup diff --git a/inc/js/mylife-agent-factory.mjs b/inc/js/mylife-agent-factory.mjs index c002b91..c310be6 100644 --- a/inc/js/mylife-agent-factory.mjs +++ b/inc/js/mylife-agent-factory.mjs @@ -19,7 +19,7 @@ import LLMServices from './mylife-llm-services.mjs' import Menu from './menu.mjs' import MylifeMemberSession from './session.mjs' import chalk from 'chalk' -/* modular constants */ +/* module constants */ // global object keys to exclude from class creations [apparently fastest way in js to lookup items, as they are hash tables] const { MYLIFE_SERVER_MBR_ID: mPartitionId, } = process.env const mBotInstructions = {} @@ -57,7 +57,7 @@ const vmClassGenerator = vm.createContext({ }) /* dependent constants and functions */ const mAlerts = { - system: await mDataservices.getAlerts(), // not sure if we need other types in global modular, but feasibly historical alerts could be stored here, etc. + system: await mDataservices.getAlerts(), // not sure if we need other types in global module, but feasibly historical alerts could be stored here, etc. } // @todo: capitalize hard-codings as per actual schema classes const mSchemas = { @@ -68,14 +68,14 @@ const mSchemas = { session: MylifeMemberSession } const mSystemActor = await mDataservices.bot(undefined, 'actor', undefined) -/* modular construction functions */ +/* module construction functions */ mConfigureSchemaPrototypes() mPopulateBotInstructions() /* logging/reporting */ console.log(chalk.bgRedBright('<-----AgentFactory module loaded----->')) console.log(chalk.greenBright('schema-class-constructs')) console.log(mSchemas) -/* modular classes */ +/* module classes */ class BotFactory extends EventEmitter{ // micro-hydration version of factory for use _by_ the MyLife server #dataservices @@ -256,6 +256,17 @@ class BotFactory extends EventEmitter{ // @todo remove restriction (?) for all experiences to be stored under MyLife `mbr_id` return await mDataservices.getItem(_experience_id, 'system') } + /** + * Proxy for modular mHelp() function. + * @public + * @param {string} thread_id - The thread id. + * @param {string} bot_id - The bot id. + * @param {string} helpRequest - The help request string. + * @returns {Promise} - openai `message` objects. + */ + async help(thread_id, bot_id, helpRequest){ + return await mHelp(thread_id, bot_id, helpRequest, this) + } /** * Gets, creates or updates Library in Cosmos. * @todo - institute bot for library mechanics. @@ -417,12 +428,19 @@ class AgentFactory extends BotFactory{ */ async init(mbr_id){ if(mIsMyLife(mbr_id)) - throw new Error('MyLife server AgentFactory cannot be initialized, as it references modular dataservices on constructor().') + throw new Error('MyLife server AgentFactory cannot be initialized, as it references module dataservices on constructor().') await super.init(mbr_id) if(this.core.openaiapikey) this.#llmServices = new LLMServices(this.core.openaiapikey, this.core.openaiorgkey) return this } + /** + * Retrieves all public experiences (i.e., owned by MyLife). + * @returns {Object[]} - An array of the currently available public experiences. + */ + async availableExperiences(){ + return await mDataservices.availableExperiences() + } /** * Retrieves avatar properties from Member factory dataservices, or inherits the core data from Member class. * @returns {object} - Avatar properties. @@ -480,7 +498,7 @@ class AgentFactory extends BotFactory{ } async getMyLifeSession(){ // default is session based around default dataservices [Maht entertains guests] - // **note**: conseuquences from this is that I must be careful to not abuse the modular space for sessions, and regard those as _untouchable_ + // **note**: conseuquences from this is that I must be careful to not abuse the module space for sessions, and regard those as _untouchable_ return await new (mSchemas.session)( ( new AgentFactory(mPartitionId) ) // no need to init currently as only pertains to non-server adjustments // I assume this is where the duplication is coming but no idea why @@ -649,10 +667,10 @@ class AgentFactory extends BotFactory{ return process.env.MYLIFE_EMBEDDING_SERVER_URL+':'+process.env.MYLIFE_EMBEDDING_SERVER_PORT } } -// private modular functions +// private module functions /** * Initializes openAI assistant and returns associated `assistant` object. - * @modular + * @module * @param {LLMServices} llmServices - OpenAI object * @param {object} bot - bot creation instructions. * @returns {object} - [OpenAI assistant object](https://platform.openai.com/docs/api-reference/assistants/object) @@ -707,7 +725,7 @@ function assignClassPropertyValues(_propertyDefinition){ } /** * Creates new avatar property data package to be consumed by Avatar class `constructor`. Defines critical avatar fields as: ["being", "id", "mbr_id", "name", "names", "nickname", "proxyBeing", "type"]. - * @modular + * @module * @param {object} _core - Datacore object * @returns {object} - Avatar property data package */ @@ -768,7 +786,7 @@ async function mConfigureSchemaPrototypes(){ // add required functionality as de } /** * Creates bot and returns associated `bot` object. - * @modular + * @module * @private * @param {LLMServices} llm - OpenAI object * @param {AgentFactory} factory - Agent Factory object @@ -782,7 +800,7 @@ async function mCreateBotLLM(llm, bot){ } /** * Creates bot and returns associated `bot` object. - * @modular + * @module * @async * @private * @param {LLMServices} llm - LLMServices Object contains methods for interacting with OpenAI @@ -823,7 +841,7 @@ async function mCreateBot(llm, factory, bot){ } /** * Returns MyLife-version of bot instructions. - * @modular + * @module * @private * @param {BotFactory} factory - Factory object * @param {object} _bot - Bot object @@ -1051,6 +1069,19 @@ function mGetAIFunctions(type, globals){ } return functions } +/** + * Take help request about MyLife and consults appropriate engine for response. + * @requires mLLMServices - equivalent of default MyLife dataservices/factory + * @param {string} thread_id - The thread id. + * @param {string} bot_id - The bot id. + * @param {string} helpRequest - The help request string. + * @param {AgentFactory} factory - The AgentFactory object; **note**: ensure prior that it is generic Q-conversation. + * @returns {Promise} - openai `message` objects. + */ +async function mHelp(thread_id, bot_id, helpRequest, factory){ + const response = await mLLMServices.help(thread_id, bot_id, helpRequest, factory) + return response +} /** * Inflates library item with required values and structure. Object structure expected from API, librayItemItem in JSON. * root\inc\json-schemas\bots\library-bot.json @@ -1081,7 +1112,7 @@ function mInflateLibraryItem(_item, _library_id, _mbr_id){ } /** * Hydrates library and returns library object. - * @modular + * @module * @private * @param {BotFactory} factory - BotFactory object * @param {object} _library - Library object @@ -1259,7 +1290,7 @@ function mSanitizeSchemaKey(_key){ } /** * Ingests a class definition and sanitizes its keys. - * @modular + * @module * @param {object} _class - Class definition to sanitize. * @param {object} _mutatedKeysObject - Object to hold mutated sanitized keys. * @returns {void} - Internally mutates parameter references. diff --git a/inc/js/mylife-avatar.mjs b/inc/js/mylife-avatar.mjs index 5efef00..7b1fa9d 100644 --- a/inc/js/mylife-avatar.mjs +++ b/inc/js/mylife-avatar.mjs @@ -1,8 +1,9 @@ import { Marked } from 'marked' import EventEmitter from 'events' +import oAIAssetAssistant from './agents/system/asset-assistant.mjs' import { EvolutionAssistant } from './agents/system/evolution-assistant.mjs' import LLMServices from './mylife-llm-services.mjs' -/* modular constants */ +/* module constants */ const { MYLIFE_DB_ALLOW_SAVE, OPENAI_MAHT_GPT_OVERRIDE, } = process.env const mAllowSave = JSON.parse(MYLIFE_DB_ALLOW_SAVE ?? 'false') const mAvailableModes = ['standard', 'admin', 'evolution', 'experience', 'restoration'] @@ -12,7 +13,7 @@ const mBotIdOverride = OPENAI_MAHT_GPT_OVERRIDE * @extends EventEmitter * @description An avatar is a digital self proxy of Member. Not of the class but of the human themselves - they are a one-to-one representation of the human, but the synthetic version that interopts between member and internet when inside the MyLife platform. The Avatar is the manager of the member experience, and is the primary interface with the AI (aside from when a bot is handling API request, again we are speaking inside the MyLife platform). * @todo - deprecate `factory` getter - * @todo - more efficient management of modular constants, should be classes? + * @todo - more efficient management of module constants, should be classes? */ class Avatar extends EventEmitter { #activeBotId // id of active bot in this.#bots; empty or undefined, then this @@ -34,7 +35,7 @@ class Avatar extends EventEmitter { #livedExperiences = [] // array of ids for lived experiences #livingExperience #llmServices - #mode = 'standard' // interface-mode from modular `mAvailableModes` + #mode = 'standard' // interface-mode from module `mAvailableModes` #nickname // avatar nickname, need proxy here as getter is complex #proxyBeing = 'human' /** @@ -50,6 +51,8 @@ class Avatar extends EventEmitter { } /* public functions */ /** + * Initialize the Avatar class. + * @todo - create class-extender specific to the "singleton" MyLife avatar * @async * @public * @returns {Promise} Promise resolves to this Avatar class instantiation @@ -90,7 +93,7 @@ class Avatar extends EventEmitter { mAssignEvolverListeners(this.#factory, this.#evolver, this) /* init evolver */ await this.#evolver.init() - } else { // Q-specific, leave as `else` as is near always false + } else { // @stub - Q-specific, leave as `else` as is near always false this.activeBotId = activeBot.id activeBot.bot_id = mBotIdOverride ?? activeBot.bot_id this.#llmServices.botId = activeBot.bot_id @@ -143,22 +146,7 @@ class Avatar extends EventEmitter { && message.type==='chat' && message.role!=='user' }) - .map(message=>{ - message = mPrepareMessage(message.content) // returns object { category, content } - const { category, content } = message - return { - activeBotId: bot.id, - activeBotAIId: bot.bot_id, - agent: 'server', - category: category, - contributions: [], - message: content, - purpose: 'chat response', - response_time: Date.now()-processStartTime, - thread_id: conversation.thread_id, - type: 'chat', - } - }) + .map(message=>mPruneMessage(bot, message, 'chat', processStartTime)) return chat } /** @@ -311,6 +299,42 @@ class Avatar extends EventEmitter { return this.#conversations .find(_=>_.thread?.id===thread_id) } + /** + * Returns all conversations of a specific-type stored in memory. + * @param {string} type - Type of conversation: chat, experience, dialog, inter-system, etc.; defaults to `chat`. + * @returns {Conversation[]} - The array of conversation objects. + */ + getConversations(type='chat'){ + return this.#conversations + .filter(_=>_?.type===type) + } + /** + * Request help about MyLife. **caveat** - correct avatar should have been selected prior to calling. + * @param {string} helpRequest - The help request text. + * @param {string} type - The type of help request. + * @returns {Promise} - openai `message` objects. + */ + async help(helpRequest, type){ + const processStartTime = Date.now() + if(!helpRequest?.length) + throw new Error('Help request required.') + // @stub - force-type into enum? + helpRequest = mHelpIncludePreamble(type, this.isMyLife) + helpRequest + const { thread_id, } = this.activeBot + const { bot_id, } = this.helpBots?.find(bot=>(bot?.subType ?? bot?.sub_type ?? bot?.subtype)===type) + ?? this.helpBots?.[0] + ?? this.activeBot + // @stub rewrite mCallLLM, but ability to override conversation? No, I think in general I would prefer this to occur at factory as it is here + const conversation = this.getConversation(thread_id) + const helpResponseArray = await this.factory.help(thread_id, bot_id, helpRequest) + conversation.addMessages(helpResponseArray) + if(mAllowSave) + conversation.save() + else + console.log('chatRequest::BYPASS-SAVE', conversation.message.content) + const response = mPruneMessages(this.activeBot, helpResponseArray, 'help', processStartTime) + return response + } /** * Allows member to reset passphrase. * @param {string} passphrase @@ -361,6 +385,19 @@ class Avatar extends EventEmitter { } return this.#conversations[0].threadId } + /** + * Upload files to MyLife and/or LLM. + * @todo - implement MyLife file upload. + * @param {File[]} files - The array of files to upload. + * @param {boolean} includeMyLife - Whether to include MyLife in the upload, defaults to `false`. + * @returns + */ + async upload(files, includeMyLife=false){ + if(this.isMyLife) + throw new Error('MyLife avatar cannot upload files.') + const assetAgent = new oAIAssetAssistant(files, this.#factory, this.globals, this.#llmServices, includeMyLife) + await assetAgent.init() + } // upon dissolution, forced/squeezed by session presumably (dehydrate), present itself to factory.evolution agent (or emit?) for inspection and incorporation if appropriate into datacore /* getters/setters */ /** @@ -435,7 +472,7 @@ class Avatar extends EventEmitter { * @returns {object} - The personal avatar bot. */ get avatarBot(){ - return this.#bots.find(_bot=>_bot.type==='personal-avatar') + return this.bots.find(_bot=>_bot.type==='personal-avatar') } /** * Get the "avatar's" being, or more precisely the name of the being (affiliated object) the evatar is emulating. @@ -597,6 +634,19 @@ class Avatar extends EventEmitter { get globals(){ return this.#factory.globals } + /** + * Get the help bots, primarily MyLife avatar, though presume there are a number of custom self-help bots that would be capable of referencing preferences, internal searches, etc. + * @getter + * @returns {array} - The help bots. + */ + get helpBots(){ + return this.bots.filter(bot=>bot.type==='help') + } + /** + * Test whether avatar is in an `experience`. + * @getter + * @returns {boolean} - Avatar is in `experience` (true) or not (false). + */ get isInExperience(){ return this.mode==='experience' } @@ -761,10 +811,10 @@ class Avatar extends EventEmitter { this.#nickname = nickname } } -/* modular functions */ +/* module functions */ /** * Assigns evolver listeners. - * @modular + * @module * @param {AgentFactory} factory - Agent Factory object * @param {EvolutionAssistant} evolver - Evolver object * @param {Avatar} avatar - Avatar object @@ -791,7 +841,7 @@ function mAssignEvolverListeners(factory, evolver, avatar){ // send to gpt for summary const _responses = _contribution.responses.join('\n') const _summary = factory.openai.completions.create({ - model: 'gpt-3.5-turbo', + model: 'gpt-4o', prompt: 'summarize answers in 512 chars or less, if unsummarizable, return "NONE": ' + _responses, temperature: 1, max_tokens: 700, @@ -808,7 +858,7 @@ function mAssignEvolverListeners(factory, evolver, avatar){ /** * Assigns (directly mutates) private experience variables from avatar. * @todo - theoretically, the variables need not come from the same avatar instance... not sure of viability - * @modular + * @module * @param {object} experienceVariables - Experience variables object from Avatar class definition. * @param {Avatar} avatar - Avatar instance. * @returns {void} - mutates experienceVariables @@ -827,7 +877,7 @@ function mAssignGenericExperienceVariables(experienceVariables, avatar){ /** * Updates or creates bot (defaults to new personal-avatar) in Cosmos and returns successful `bot` object, complete with conversation (including thread/thread_id in avatar) and gpt-assistant intelligence. * @todo Fix occasions where there will be no object_id property to use, as it was created through a hydration method based on API usage, so will be attached to mbr_id, but NOT avatar.id - * @modular + * @module * @param {AgentFactory} factory - Agent Factory object * @param {Avatar} avatar - Avatar object that will govern bot * @param {object} bot - Bot object @@ -866,7 +916,7 @@ async function mBot(factory, avatar, bot){ * Makes call to LLM and to return response(s) to prompt. * @todo - create actor-bot for internal chat? Concern is that API-assistants are only a storage vehicle, ergo not an embedded fine tune as I thought (i.e., there still may be room for new fine-tuning exercise); i.e., micro-instructionsets need to be developed for most. Unclear if direct thread/message instructions override or ADD, could check documentation or gpt, but... * @todo - address disconnect between conversations held in memory in avatar and those in openAI threads; use `addLLMMessages` to post internally - * @modular + * @module * @param {LLMServices} llmServices - OpenAI object currently * @param {Conversation} conversation - Conversation object * @param {string} prompt - dialog-prompt/message for llm @@ -887,7 +937,7 @@ async function mCallLLM(llmServices, conversation, prompt, factory){ } /** * Cancels openAI run. - * @modular + * @module * @param {LLMServices} llmServices - OpenAI object * @param {string} threadId - Thread id * @param {string} runId - Run id @@ -905,7 +955,7 @@ async function mCancelRun(llmServices, threadId, runId,){ * @todo - any trouble retrieving a known actor should be understudied by... Q? or personal-avatar? yes, personal avatar for now * @todo - implement `creator` version of actor * @todo - include variables for names of roles/actors - * @modular + * @module * @param {AgentFactory} factory - Agent Factory object * @param {array} cast - Array of cast objects * @returns {Promise} - Array of ExperienceCastMember instances @@ -964,7 +1014,7 @@ async function mEventCharacter(llm, experience, character){ * @todo - add LLM usage data to conversation * @todo - when `variable` undefined in `experience.variables`, check to see if event can be found that will provide it * @todo - seems unnecessary to have experience extension handling basic data construction at this stage... refactor, tho? - * @modular + * @module * @public * @param {LLMServices} llm - OpenAI object currently * @param {Experience} experience - Experience class instance. @@ -1026,7 +1076,7 @@ async function mEventDialog(llm, experience, event, iteration=0){ * Returns a processed memberInput event. * @todo - once conversations are not spurred until needed, add a third conversation to the experience, which would be the scriptAdvisor (not actor) to determine success conditions for scene, etc. * @todo - handle complex success conditions - * @modular + * @module * @public * @param {LLMServices} llm - OpenAI object currently. * @param {Experience} experience - Experience class instance. @@ -1149,7 +1199,7 @@ async function mEventInput(llm, experience, event, iteration=0, memberInput){ * @todo - mutations should be handled by `ExperienceEvent` extenders. * @todo - script dialog change, input assessment, success evals to completions or cheaper? babbage-002 ($0.40/m) is only cheaper than 3.5 ($3.00/m); can test efficacy for dialog completion, otherwise, 3.5 exceptional * @todo - iterations need to be re-included, although for now, one dialog for experience is fine - * @modular + * @module * @public * @param {LLMServices} llm - OpenAI object currently * @param {Experience} experience - Experience class instance. @@ -1202,7 +1252,7 @@ async function mEventProcess(llm, experience, event, memberInput){ * Returns a processed stage event. * @todo - add LLM usage data to conversation. * @todo - when `action==='stage'`, deprecate effects and actor - * @modular + * @module * @public * @param {LLMServices} llm - OpenAI object currently. * @param {Experience} experience - Experience class instance. @@ -1223,7 +1273,7 @@ function mEventStage(llm, experience, stage){ * @todo - allow auto-skip to scene/event? * @todo - Branching and requirements for scene entry and completion * @todo - ExperienceScene and ExperienceEvent should be classes? - * @modular + * @module * @public * @param {AgentFactory} factory - AgentFactory object * @param {object} llm - ai interface object @@ -1376,7 +1426,7 @@ async function mExperienceStart(avatar, factory, experienceId, avatarExperienceV } /** * Gets bot by id. - * @modular + * @module * @param {object} avatar - Avatar instance. * @param {string} _botId - Bot id * @returns {object} - Bot object @@ -1388,7 +1438,7 @@ function mFindBot(avatar, _botId){ } /** * Returns simple micro-category after logic mutation. - * @modular + * @module * @param {string} _category text of category * @returns {string} formatted category */ @@ -1401,7 +1451,7 @@ function mFormatCategory(_category){ } /** * Returns MyLife-version of chat category object - * @modular + * @module * @param {object} _category - local front-end category { category, contributionId, question/message/content } * @returns {object} - local category { category, contributionId, content } */ @@ -1421,6 +1471,28 @@ function mGetChatCategory(_category) { } return _proposedCategory } +/** + * Include help preamble to _LLM_ request, not outbound to member/guest. + * @todo - expand to include other types of help requests, perhaps more validation. + * @param {string} type - The type of help request. + * @param {boolean} isMyLife - Whether the request is from MyLife. + * @returns {string} - The help preamble to be included. + */ +function mHelpIncludePreamble(type, isMyLife){ + switch(type){ + case 'account': + case 'membership': + if(isMyLife) + throw new Error(`Members can only request information about their own accounts.`) + return 'Following help request is for MyLife member account information or management:\n' + case 'interface': + return 'Following question is expected to be about MyLife Member Platform Interface:\n' + case 'general': + case 'help': + default: + return 'Following help request is about MyLife in general:\n' + } +} /** * Get experience scene navigation array. * @getter @@ -1455,36 +1527,75 @@ function mNavigation(scenes){ /** * returns simple micro-message with category after logic mutation. * Currently tuned for openAI gpt-assistant responses. - * @modular + * @todo - revamp as any of these LLMs can return JSON or run functions for modes. + * @module * @private - * @param {string} _msg text of message, currently from gpt-assistants - * @returns {object} { category, content } + * @param {object} bot - The bot object, usually active. + * @param {string} message - The text of LLM message. + * @param {string} type - The type of message, defaults to chat. + * @param {number} processStartTime - The time the process started, defaults to function call. + * @returns {object} - The bot-included message object. */ -function mPrepareMessage(_msg){ +function mPruneMessage(bot, message, type='chat', processStartTime=Date.now()){ /* parse message */ - // Regular expression to match the pattern "Category Mode: {category}. " or "Category Mode: {category}\n"; The 'i' flag makes the match case-insensitive - const _categoryRegex = /^Category Mode: (.*?)\.?$/gim - const _match = _categoryRegex.exec(_msg) - const _messageCategory = mFormatCategory(_match?.[1]??'') - if(_msg.content) _msg = _msg.content - // Remove from _msg - _msg = _msg.replace(_categoryRegex, '') - const _content = _msg.split('\n') - .filter(_line => _line.trim() !== '') // Remove empty lines - .map(_msg=>{ - return new Marked().parse(_msg) - }) + const { bot_id: activeBotAIId, id: activeBotId, } = bot + let agent='server', + category, + contributions=[], + purpose=type, + response_time=Date.now()-processStartTime + const { content: messageContent, thread_id, } = message + const content = Array.isArray(messageContent) + ? messageContent.reduce((acc, item) => { + if (item?.type==='text' && item?.text?.value) { + acc += item.text.value + '\n' + } + return acc + }, '') + .replace(/\n{2,}/g, '\n') + : messageContent + message = new Marked().parse(content) + const messageResponse = { + activeBotId, + activeBotAIId, + agent, + category, + contributions, + message, + purpose, + response_time, + thread_id, + type, + } + return messageResponse +} +/** + * Flattens an array of messages into a single frontend-consumable message. + * @param {object} bot - The bot object, usually active. + * @param {Object[]} messages - The array of messages to prune. + * @param {string} type - The type of message, defaults to chat. + * @param {number} processStartTime - The time the process started, defaults to function call. + * @returns {object} - Concatenated message object. + */ +function mPruneMessages(bot, messageArray, type='chat', processStartTime=Date.now()){ + if(!messageArray.length) + throw new Error('No messages to prune') + const prunedMessages = messageArray + .map(message=>mPruneMessage(bot, message, type, processStartTime)) + const messageContent = prunedMessages + .map(message=>message.message) .join('\n') - return { - category: _messageCategory, - content: _content, + const message = { + ...prunedMessages[0], + message: messageContent, } + return message } /** * Replaces variables in prompt with Experience values. * @todo - variables should be back populated to experience, confirm * @todo - events could be identified where these were input if empty - * @modular + * @module * @private * @param {string} prompt - Dialog prompt, replace variables. * @param {string[]} variableList - List of variables to replace. @@ -1501,7 +1612,7 @@ function mReplaceVariables(prompt, variableList, variableValues){ } /** * Returns a sanitized event. - * @modular + * @module * @param {ExperienceEvent} event - Event object. * @returns {object} - Synthetic Event object. */ diff --git a/inc/js/mylife-data-service.js b/inc/js/mylife-data-service.js index 80e324e..fab78d9 100644 --- a/inc/js/mylife-data-service.js +++ b/inc/js/mylife-data-service.js @@ -109,6 +109,19 @@ class Dataservices { return this.#partitionId } // public functions + /** + * Retrieves all public experiences (i.e., owned by MyLife). + * @public + * @async + * @returns {Object[]} - An array of the currently available public experiences. + */ + async availableExperiences(){ + return await this.getItemsByFields( + 'experience', + [{ name: '@public', value: true }], + 'system', + ) + } /** * Get a bot specified by id or type. * @public diff --git a/inc/js/mylife-datamanager.mjs b/inc/js/mylife-datamanager.mjs index d3b94f7..2937981 100644 --- a/inc/js/mylife-datamanager.mjs +++ b/inc/js/mylife-datamanager.mjs @@ -4,7 +4,7 @@ import { CosmosClient } from '@azure/cosmos' import chalk from 'chalk' import Config from './mylife-datasource-config.mjs' import Globals from './globals.mjs' -/* modular constants */ +/* module constants */ const mGlobals = new Globals() // define class class Datamanager { diff --git a/inc/js/mylife-llm-services.mjs b/inc/js/mylife-llm-services.mjs index cda0d03..16e41a5 100644 --- a/inc/js/mylife-llm-services.mjs +++ b/inc/js/mylife-llm-services.mjs @@ -1,5 +1,5 @@ import OpenAI from 'openai' -/* modular constants */ +/* module constants */ const { OPENAI_API_KEY: mOpenaiKey, OPENAI_BASE_URL: mBasePath, OPENAI_ORG_KEY: mOrganizationKey, OPENAI_API_CHAT_RESPONSE_PING_INTERVAL, OPENAI_API_CHAT_TIMEOUT, } = process.env const mTimeoutMs = parseInt(OPENAI_API_CHAT_TIMEOUT) || 55000 const mPingIntervalMs = parseInt(OPENAI_API_CHAT_RESPONSE_PING_INTERVAL) || 890 @@ -42,6 +42,7 @@ class LLMServices { } /** * Given member input, get a response from the specified LLM service. + * @todo - confirm that reason for **factory** is to run functions as responses from LLM; ergo in any case, find better way to stash/cache factory so it does not need to be passed through every such function * @param {string} threadId - Thread id. * @param {string} botId - GPT-Assistant/Bot id. * @param {string} prompt - Member input. @@ -57,6 +58,18 @@ class LLMServices { return llmMessages .filter(message=>message.role=='assistant' && message.run_id==run_id) } + /** + * Given member request for help, get response from specified bot assistant. + * @param {string} threadId - Thread id. + * @param {string} botId - GPT-Assistant/Bot id. + * @param {string} helpRequest - Member input. + * @param {AgentFactory} factory - Avatar Factory object to process request. + * @returns {Promise} - openai `message` objects. + */ + async help(threadId, botId, helpRequest, factory){ + const helpResponse = await this.getLLMResponse(threadId, botId, helpRequest, factory) + return helpResponse + } /** * Create a new OpenAI thread. * @param {string} threadId - thread id @@ -76,10 +89,10 @@ class LLMServices { return this.#llmProviders } } -/* modular functions */ +/* module functions */ /** * Takes Member input request and assigns it to OpenAI thread for processing. - * @modular + * @module * @async * @param {OpenAI} openai - openai object * @param {string} threadId - thread id @@ -95,7 +108,7 @@ async function mAssignRequestToThread(openai, threadId, request){ } /** * Gets message from OpenAI thread. - * @modular + * @module * @async * @param {OpenAI} openai - openai object * @param {string} threadId - thread id @@ -111,7 +124,7 @@ async function mMessage(openai, threadId, messageId){ } /** * Format input for OpenAI. - * @modular + * @module * @param {string} message - message text * @returns {object} - synthetic openai `message` object */ @@ -124,7 +137,7 @@ function mMessage_openAI(message){ } /** * Gets messages from OpenAI thread. - * @modular + * @module * @async * @param {OpenAI} openai - openai object * @param {string} threadId - thread id @@ -135,7 +148,7 @@ async function mMessages(openai, threadId){ } /** * Maintains vigil for status of openAI `run = 'completed'`. - * @modular + * @module * @async * @param {OpenAI} openai - openai object * @param {object} run - [OpenAI run object](https://platform.openai.com/docs/api-reference/runs/object) @@ -167,7 +180,7 @@ async function mRunFinish(llmServices, run, factory){ /** * Executes openAI run functions. See https://platform.openai.com/docs/assistants/tools/function-calling/quickstart. * @todo - storysummary output action requires integration with factory/avatar data intersecting with story submission - * @modular + * @module * @private * @async * @param {object} run - [OpenAI run object](https://platform.openai.com/docs/api-reference/runs/object) @@ -187,6 +200,15 @@ async function mRunFunctions(openai, run, factory){ const { id, function: toolFunction, type, } = tool let { arguments: toolArguments, name, } = toolFunction switch(name.toLowerCase()){ + case 'hijackattempt': + case 'hijack_attempt': + case 'hijack attempt': + console.log('mRunFunctions()::hijack_attempt', toolArguments) + const confirmation = { + tool_call_id: id, + output: JSON.stringify({ success: true, }), + } + return confirmation case 'story': // storySummary.json case 'storysummary': case 'story-summary': @@ -237,7 +259,7 @@ async function mRunFunctions(openai, run, factory){ } /** * Returns all openai `run` objects for `thread`. - * @modular + * @module * @async * @param {OpenAI} openai - openai object * @param {string} threadId - Thread id @@ -249,7 +271,7 @@ async function mRuns(openai, threadId){ } /** * Checks status of openAI run. - * @modular + * @module * @async * @param {OpenAI} openai - openai object * @param {object} run - Run id @@ -282,7 +304,7 @@ async function mRunStatus(openai, run, factory){ } /** * Returns requested openai `run` object. - * @modular + * @module * @async * @param {Avatar} _avatar - Avatar object * @param {string} run_id - Run id @@ -298,7 +320,7 @@ async function mRunStep(_avatar, run_id, _step_id){ } /** * Returns all openai `run-step` objects for `run`. - * @modular + * @module * @async * @param {Avatar} _avatar - Avatar object * @param {string} run_id - Run id @@ -314,7 +336,7 @@ async function mRunSteps(_avatar, run_id){ } /** * Executes openAI run and returns associated `run` object. - * @modular + * @module * @param {OpenAI} openai - OpenAI object * @param {string} assistantId - Assistant id * @param {string} threadId - Thread id @@ -328,7 +350,7 @@ async function mRunStart(llmServices, assistantId, threadId){ } /** * Triggers openAI run and updates associated `run` object. - * @modular + * @module * @param {OpenAI} openai - OpenAI object * @param {string} botId - Bot id * @param {string} threadId - Thread id @@ -347,7 +369,7 @@ async function mRunTrigger(openai, botId, threadId, factory){ } /** * Create or retrieve an OpenAI thread. - * @modular + * @module * @param {OpenAI} openai - openai object * @param {string} threadId - thread id * @returns {Promise} - openai thread object diff --git a/inc/js/routes.mjs b/inc/js/routes.mjs index 916be45..926b2e7 100644 --- a/inc/js/routes.mjs +++ b/inc/js/routes.mjs @@ -11,6 +11,7 @@ import { collections, contributions, deleteItem, + help, index, interfaceMode, login, @@ -24,6 +25,7 @@ import { _upload } from './functions.mjs' import { + availableExperiences, experience, experienceCast, experienceEnd, @@ -50,12 +52,14 @@ _Router.get('/about', about) _Router.get('/alerts', alerts) _Router.get('/login/:mid', login) _Router.get('/logout', logout) +_Router.get('/experiences', availableExperiences) _Router.get('/select', loginSelect) _Router.get('/status', status) _Router.get('/privacy-policy', privacyPolicy) _Router.get('/signup', status_signup) _Router.post('/', chat) _Router.post('/challenge/:mid', challenge) +_Router.post('/help', help) _Router.post('/signup', signup) /* api webhook routes */ _apiRouter.use(tokenValidation) diff --git a/sample.env b/sample.env index 0508b94..8602740 100644 --- a/sample.env +++ b/sample.env @@ -7,8 +7,8 @@ OPENAI_MAHT_GITHUB=https://github.com/MyLife-Services/mylife-maht.git OPENAI_MAHT_GPT_OVERRIDE=asst_... # local dev override for engine so that local can use chaper model # 2000 chars (not tokens) is the max; disallows large code pastes, or rather, converts them as appropriate to file(s); **also** converts large text pastes to files a human member can use as well, though functionality TODO OPENAI_MAX_CONTEXT_WINDOW=2000 -OPENAI_MODEL_CORE_AVATAR=gpt-3.5-turbo -OPENAI_MODEL_CORE_BOT=gpt-3.5-turbo +OPENAI_MODEL_CORE_AVATAR=gpt-4o +OPENAI_MODEL_CORE_BOT=gpt-4o OPENAI_ORG_KEY=org-dTYDMEBuP2yb2qtwCQJA4HHh # MyLife org ID PORT=3000 MYLIFE_ALLOW_INTELLIGENT_QUESTIONS=false # almost deprecated, leave false diff --git a/server.js b/server.js index 4fd46f3..0b00cd9 100644 --- a/server.js +++ b/server.js @@ -92,7 +92,7 @@ app.use(koaBody({ ?? ctx.MyLife // point member to session member (logged in) or MAHT (not logged in) ctx.state.avatar = ctx.state.member.avatar ctx.state.contributions = ctx.state.avatar.contributions - ctx.state.interfaceMode = ctx.state.avatar?.mode??'standard' + ctx.state.interfaceMode = ctx.state.avatar?.mode ?? 'standard' ctx.state.menu = ctx.MyLife.menu if(!await ctx.state.MemberSession.requestConsent(ctx)) ctx.throw(404,'asset request rejected by consent') diff --git a/views/assets/css/animations.css b/views/assets/css/animations.css new file mode 100644 index 0000000..eff5441 --- /dev/null +++ b/views/assets/css/animations.css @@ -0,0 +1,67 @@ +/* Keyframes */ +@keyframes blink { + 0%, 100% { opacity: 1; } + 60% { opacity: 0.5; } +} +@keyframes dialogFade { + 0% { opacity: 0; } + 10% { opacity: 1; } + 90% { opacity: 1; } + 100% { opacity: var(--dialog-final-opacity); } +} +@keyframes dropDown { + 100% { top: 0; } +} +@keyframes fadeIn { + 0% { opacity: 0; } + 100% { opacity: 1; } +} +@keyframes flipAnimation { + 0% { transform: rotateY(180deg); } + 100% { transform: rotateY(0); } +} +@keyframes textFadeIn { + 0% { opacity: 0; transform: translateY(20px); } + 100% { opacity: 1; transform: translateY(0); } +} +@keyframes fadeOut { + 0% { opacity: 1; } + 100% { opacity: 0; } +} +@keyframes slideInFromBottom { + 0% { transform: translateY(100%); } + 100% { transform: translateY(0); } +} +@keyframes slideInFromLeft { + 0% { transform: translateX(-100%); } + 100% { transform: translateX(0); } +} +@keyframes slideInFromRight { + 0% { transform: translateX(100%); } + 100% { transform: translateX(0); } +} +@keyframes slideInFromTop { + 0% { transform: translateY(-100%); } + 100% { transform: translateY(0); } +} +@keyframes spin { + 0% { transform: rotate(0deg); } + 100% { transform: rotate(360deg); } +} +/* squeeze-out */ +@keyframes squeezeOutFromBottom { + from { transform: scaleY(0); transform-origin: bottom; } /* Ensure scaling happens from the left */ + to { transform: scaleY(1); } +} +@keyframes squeezeOutFromLeft { + from { transform: scaleX(0); transform-origin: left; } /* Ensure scaling happens from the left */ + to { transform: scaleX(1); } +} +@keyframes squeezeOutFromRight { + from { transform: scaleX(0); transform-origin: right; } /* Ensure scaling happens from the left */ + to { transform: scaleX(1); } +} +@keyframes squeezeOutFromTop { + from { transform: scaleY(0); transform-origin: top; } /* Ensure scaling happens from the left */ + to { transform: scaleY(1); } +} \ No newline at end of file diff --git a/views/assets/css/bot-bar.css b/views/assets/css/bot-bar.css index 9a9c14b..a98cd7b 100644 --- a/views/assets/css/bot-bar.css +++ b/views/assets/css/bot-bar.css @@ -266,12 +266,8 @@ width: 100%; } .collection-popup { - background-color: white; - border: 1px solid black; - box-shadow: 0 0 10px rgba(0, 0, 0, 0.5); display: flex; flex: 1 0 auto; - gap: 0.5em; height: auto; left: auto; overflow: auto; @@ -282,24 +278,19 @@ visibility: hidden; width: 40em; } -.collection-popup-close { - position: absolute; - top: 10px; - right: 10px; - border: none; - background: none; - color: navy; - cursor: pointer; - font-size: 1.3em; +.collection-popup.popup-container { + bottom: auto; } -.collection-popup-content { - display: flex; - flex-direction: column; - flex-wrap: wrap; - justify-content: flex-start; - text-align: left; - max-width: 100%; - width: 100%; +.popup-close.collection-popup-close { + font-size: 1rem; +} +.collection-popup-close:hover { + background-color: rgba(255, 0, 0, 0.5); + border-color: aliceblue; + color: white; +} +.popup-content.collection-popup-content { + font-size: 0.75rem; } .collection-popup-visible { top: auto; diff --git a/views/assets/css/chat.css b/views/assets/css/chat.css index 6a40573..0d8c709 100644 --- a/views/assets/css/chat.css +++ b/views/assets/css/chat.css @@ -143,6 +143,9 @@ button.chat-submit:disabled { .chat-system::-webkit-scrollbar-thumb:hover { background: rgba(214, 198, 75, 0.5); /* Darker shade on hover */ } +.help-bubble { + animation: slideInFromBottom 0.5s ease-out; +} .label { color: rgba(240, 240, 240, .9); flex: 0 0 auto; diff --git a/views/assets/css/experience.css b/views/assets/css/experience.css index 9728e90..8475ec4 100644 --- a/views/assets/css/experience.css +++ b/views/assets/css/experience.css @@ -537,73 +537,6 @@ margin: auto 0.4em; width: 1.2em; } -/* Keyframes */ -@keyframes blink { - 0%, 100% { opacity: 1; } - 60% { opacity: 0.5; } -} -@keyframes dialogFade { - 0% { opacity: 0; } - 10% { opacity: 1; } - 90% { opacity: 1; } - 100% { opacity: var(--dialog-final-opacity); } -} -@keyframes dropDown { - 100% { top: 0; } -} -@keyframes fadeIn { - 0% { opacity: 0; } - 100% { opacity: 1; } -} -@keyframes flipAnimation { - 0% { transform: rotateY(180deg); } - 100% { transform: rotateY(0); } -} -@keyframes textFadeIn { - 0% { opacity: 0; transform: translateY(20px); } - 100% { opacity: 1; transform: translateY(0); } -} -@keyframes fadeOut { - 0% { opacity: 1; } - 100% { opacity: 0; } -} -@keyframes slideInFromBottom { - 0% { transform: translateY(100%); } - 100% { transform: translateY(0); } -} -@keyframes slideInFromLeft { - 0% { transform: translateX(-100%); } - 100% { transform: translateX(0); } -} -@keyframes slideInFromRight { - 0% { transform: translateX(100%); } - 100% { transform: translateX(0); } -} -@keyframes slideInFromTop { - 0% { transform: translateY(-100%); } - 100% { transform: translateY(0); } -} -@keyframes spin { - 0% { transform: rotate(0deg); } - 100% { transform: rotate(360deg); } -} -/* squeeze-out */ -@keyframes squeezeOutFromBottom { - from { transform: scaleY(0); transform-origin: bottom; } /* Ensure scaling happens from the left */ - to { transform: scaleY(1); } -} -@keyframes squeezeOutFromLeft { - from { transform: scaleX(0); transform-origin: left; } /* Ensure scaling happens from the left */ - to { transform: scaleX(1); } -} -@keyframes squeezeOutFromRight { - from { transform: scaleX(0); transform-origin: right; } /* Ensure scaling happens from the left */ - to { transform: scaleX(1); } -} -@keyframes squeezeOutFromTop { - from { transform: scaleY(0); transform-origin: top; } /* Ensure scaling happens from the left */ - to { transform: scaleY(1); } -} /* media queries */ /* Set a minimum font size */ @media (max-width: 480px) { diff --git a/views/assets/css/main.css b/views/assets/css/main.css index d99e8a8..885fc8b 100644 --- a/views/assets/css/main.css +++ b/views/assets/css/main.css @@ -1,3 +1,4 @@ +@import url('animations.css'); @import url('bot-bar.css'); @import url('chat.css'); @import url('experience.css'); @@ -49,8 +50,7 @@ body { .navigation-help, .navigation-login-logout, .navigation-nav, -.navigation-padding, -.navigation-search { +.navigation-padding { align-items: center; display: flex; flex: none; /* Prevents items from growing */ @@ -71,20 +71,6 @@ body { .navigation-help:hover { color: purple; /* Your existing styles for active link */ } -.help-container { - background: #f9f9f9; - color: navy; - cursor: default; - display: none; - flex-direction: column; - font-size: 1em; - padding: 10px; - position: absolute; - top: 2.2em; /* Position it right below the parent */ - right: 0; - width: 50%; - z-index: 100; /* Make sure it's above other elements */ -} .navigation-login-logout { align-self: flex-end; display: flex; @@ -329,6 +315,248 @@ body { align-items: center; padding: 0em 1.05em 1.05em 1.05em; } +/* MyLife Generic Popup */ +.popup-await { + align-items: center; + animation: slideInFromBottom 1s ease-out; + border: solid aliceblue; + background-color: navy; + display: none; + justify-content: center; + margin: 0.5rem 1rem; + padding: 0.5rem 1rem; + width: auto; +} +.popup-await-text { + animation: blink 1.5s infinite; + color: aliceblue; + font-size: 1rem; + font-weight: normal; +} +.popup-close { + position: absolute; + top: .5em; + right: .5em; + cursor: pointer; + font-size: 1.5rem; +} +.popup-container { + background-color: aliceblue; + bottom: 2em; + border: thin solid #ccc; + border-radius: 0.4em; + color: navy; + display: flex; + left: 0; + margin: 0; + padding: 0; + position: absolute; + width: 45%; /* default width */ + z-index: 100; +} +.popup-content { + color: navy; + display: flex; + flex: 1 1 auto; /* grows to inhabit */ + font-size: 1rem; + height: auto; + max-height: 100%; + overflow-x: hidden; + overflow-y: auto; + padding: 1em; + position: relative; +} +.popup-dialog { + display: flex; + flex-direction: column; + justify-content: flex-start; + margin: 0; + padding: 0; + width: 100%; +} +.popup-dialog-box { + background-color: rgba(229, 59, 0, 1); + border: solid navy; + border-radius: 0.4em; + color: aliceblue; + display: flex; + flex-direction: row; + justify-content: flex-start; + margin: 0 1rem; + overflow: visible; + padding: .5rem; + width: 85%; +} +.popup-header { + align-items: center; + border-bottom: thin solid #eee; + display: flex; + font-size: 1.25rem; + justify-content: flex-start; + padding: 0.75rem; +} +.popup-input { + align-items: center; + border-top: thin solid black; + display: flex; + flex-direction: row; + font-size: 1rem; + margin: 0; + padding: 0; + width: 100%; +} +.popup-input-prompt { + padding: 0 0.5em; +} +.popup-input-submit { + padding: 0 0.5em; +} +.popup-input-text { + flex: 1 1 auto; + width: 100%; + padding: 0.25rem; + font-size: 1rem; +} +.popup-refresh { + cursor: pointer; + position: absolute; + right: 1em; + top: 1em; +} +.popup-title { + margin-left: 1em; + font-weight: bold; +} +/* MyLife Help System */ +.help-await { + border: medium solid lightpink; + border-radius: 0.6rem; +} +.help-button { + align-content: center; + background-color: #741237; + border: medium solid aliceblue; + border-radius: 0.25rem; + color: aliceblue; + cursor: pointer; + display: flex; + font-size: 0.9rem; + font-weight: bold; + margin: 0.5rem; + padding: 0.5rem; +} +.help-await-text { + font-size: 1.2rem; + font-weight: bold; +} +.help-close { + padding: 0.1em; +} +.help-close:hover { + background-color: white; + cursor: pointer; + border: thin solid red; + border-radius: .25em; +} +.help-container { + border: solid indianred; + box-shadow: 0 4px 8px rgba(0,0,0,0.1); + cursor: default; + display: flex; + flex-direction: column; + font-size: 1em; + left: auto; + position: absolute; + right: 1em; + top: 2.2em; /* Position it right below the parent */ + width: 63%; + z-index: 100; /* Make sure it's above other elements */ +} +.help-header { + font-size: 1.5rem; +} +.help-icon img { + height: 2em; + width: 2em; +} +.help-title { + padding-right: 1em; +} +.help-type { + background-color: #f9f9f9; + display: flex; + font-size: 1rem; + justify-content: space-evenly; + padding: 10px; +} +.help-type-item { + border-radius: 0.4em; + cursor: pointer; + padding: 5px 10px; +} +.help-type-item:hover { + background-color: #65a3a9; + color: white; + opacity: 1; +} +.help-type-item.active { + background-color: orangered; + color: white; + cursor: not-allowed; +} +.help-type-item { + background-color: aliceblue; + color: darkgray; + cursor: pointer; +} +.help-chat { + border: #4CAF50 1px solid; + display: flex; +} +.help-error { + align-items: center; + background-color: darkred; + color: lightcoral; + display: none; + font-size: 0.9rem; + padding: 0.5rem 0; + width: 100%; +} +.help-error-close { + align-self: flex-start; + background-color: transparent; + color: white; + cursor: pointer; + font-size: 1.5rem; + margin: 0 0.5rem; + width: auto; +} +.help-error-icon { + background-color: transparent; + font-size: 1.5rem; + margin: 0 0.7rem; + width: auto; +} +.help-error-text { + color: white; + width: 100%; +} +.help-input { + border: none; +} +.help-input-submit { + text-wrap: nowrap; + width: auto; +} +.help-input-text { + border: thin solid #ccc; + border-radius: 0.25rem; + color: black; + margin: 0.5rem; +} +.help-input-text:focus { + color: purple; +} /* MyLife sidebar */ .input { margin-top: 6px; @@ -466,6 +694,10 @@ body { 90% { opacity: 1; } 100% { opacity: 0; } } +@keyframes helpInitiatorFade { + 0% { opacity: 0; } + 100% { opacity: 1; } +} /* reverse for fade out */ /* media queries */ @media screen and (min-width: 1024px) { body { diff --git a/views/assets/html/_navbar.html b/views/assets/html/_navbar.html index 12fb190..b7b71b4 100644 --- a/views/assets/html/_navbar.html +++ b/views/assets/html/_navbar.html @@ -9,12 +9,40 @@