From 042a1bdd176e7ebe803bb0bcd2d34527ab71f00e Mon Sep 17 00:00:00 2001 From: mongooseim Date: Tue, 6 Aug 2024 13:32:16 +0000 Subject: [PATCH] Deployed 0f5700b43 to latest with MkDocs 1.6.0 and mike 2.1.2 --- latest/404.html | 2 +- latest/Contributions/index.html | 2 +- latest/History/index.html | 2 +- .../anonymous/index.html | 2 +- .../authentication-methods/dummy/index.html | 2 +- .../external/index.html | 2 +- latest/authentication-methods/http/index.html | 2 +- latest/authentication-methods/jwt/index.html | 2 +- latest/authentication-methods/ldap/index.html | 2 +- latest/authentication-methods/pki/index.html | 2 +- .../authentication-methods/rdbms/index.html | 2 +- .../Erlang-cookie-security/index.html | 2 +- latest/configuration/Modules/index.html | 2 +- latest/configuration/Services/index.html | 2 +- latest/configuration/TLS-hardening/index.html | 2 +- latest/configuration/access/index.html | 2 +- latest/configuration/acl/index.html | 2 +- latest/configuration/auth/index.html | 2 +- .../configuration-files/index.html | 2 +- .../index.html | 2 +- latest/configuration/general/index.html | 2 +- latest/configuration/host_config/index.html | 2 +- .../internal-databases/index.html | 2 +- latest/configuration/listen/index.html | 2 +- .../outgoing-connections/index.html | 2 +- .../configuration/release-options/index.html | 2 +- latest/configuration/s2s/index.html | 2 +- latest/configuration/shaper/index.html | 2 +- .../Basic-iq-handler/index.html | 2 +- .../Bootstrap-Scripts/index.html | 2 +- .../Hooks-and-handlers/index.html | 2 +- .../OpenSSL-and-FIPS/index.html | 2 +- .../SCRAM-serialization/index.html | 2 +- .../Stanza-routing/index.html | 2 +- .../Testing-MongooseIM/index.html | 2 +- .../developers-guide/accumulators/index.html | 2 +- .../domain_management/index.html | 2 +- .../hooks_description/index.html | 2 +- latest/developers-guide/logging/index.html | 2 +- .../mod_amp_developers_guide/index.html | 2 +- .../mod_muc_light_developers_guide/index.html | 2 +- .../mongoose_wpool/index.html | 2 +- .../release_config/index.html | 2 +- latest/developers-guide/xep_tool/index.html | 2 +- .../getting-started/Installation/index.html | 2 +- latest/getting-started/Quick-setup/index.html | 2 +- latest/graphql-api/Admin-GraphQL/index.html | 2 +- latest/graphql-api/User-GraphQL/index.html | 2 +- latest/graphql-api/admin-graphql-doc.html | 372 +++++++++--------- latest/graphql-api/user-graphql-doc.html | 238 +++++------ latest/index.html | 2 +- latest/listeners/listen-c2s/index.html | 2 +- latest/listeners/listen-components/index.html | 2 +- latest/listeners/listen-http/index.html | 2 +- latest/listeners/listen-s2s/index.html | 2 +- latest/migrations/3.1.1_3.2.0/index.html | 2 +- latest/migrations/3.3.0_3.4.0/index.html | 2 +- latest/migrations/3.5.0_3.6.0/index.html | 2 +- latest/migrations/3.6.0_3.7.0/index.html | 2 +- latest/migrations/3.7.0_4.0.0/index.html | 2 +- latest/migrations/4.0.0_4.0.1/index.html | 2 +- latest/migrations/4.0.1_4.1.0/index.html | 2 +- latest/migrations/4.1.0_4.2.0/index.html | 2 +- latest/migrations/4.2.0_5.0.0/index.html | 2 +- latest/migrations/5.0.0_5.1.0/index.html | 2 +- latest/migrations/5.1.0_6.0.0/index.html | 2 +- latest/migrations/6.0.0_6.1.0/index.html | 2 +- latest/migrations/6.1.0_6.2.0/index.html | 2 +- latest/migrations/6.2.0_6.2.1/index.html | 2 +- latest/migrations/6.2.1_x.x.x/index.html | 2 +- .../jid-from-mam-muc-script/index.html | 2 +- latest/modules/mod_adhoc/index.html | 2 +- latest/modules/mod_amp/index.html | 2 +- latest/modules/mod_auth_token/index.html | 13 +- latest/modules/mod_bind2/index.html | 2 +- latest/modules/mod_blocking/index.html | 2 +- latest/modules/mod_bosh/index.html | 2 +- latest/modules/mod_cache_users/index.html | 2 +- latest/modules/mod_caps/index.html | 2 +- latest/modules/mod_carboncopy/index.html | 2 +- latest/modules/mod_csi/index.html | 2 +- latest/modules/mod_disco/index.html | 2 +- .../modules/mod_domain_isolation/index.html | 2 +- latest/modules/mod_event_pusher/index.html | 2 +- .../modules/mod_event_pusher_http/index.html | 2 +- .../modules/mod_event_pusher_push/index.html | 2 +- .../mod_event_pusher_rabbit/index.html | 2 +- .../modules/mod_event_pusher_sns/index.html | 2 +- latest/modules/mod_extdisco/index.html | 2 +- latest/modules/mod_global_distrib/index.html | 2 +- latest/modules/mod_http_upload/index.html | 2 +- latest/modules/mod_inbox/index.html | 2 +- latest/modules/mod_jingle_sip/index.html | 2 +- latest/modules/mod_keystore/index.html | 7 +- latest/modules/mod_last/index.html | 2 +- latest/modules/mod_mam/index.html | 2 +- latest/modules/mod_muc/index.html | 2 +- latest/modules/mod_muc_light/index.html | 2 +- latest/modules/mod_muc_log/index.html | 2 +- latest/modules/mod_offline/index.html | 2 +- latest/modules/mod_offline_stub/index.html | 2 +- latest/modules/mod_ping/index.html | 2 +- latest/modules/mod_presence/index.html | 2 +- latest/modules/mod_privacy/index.html | 2 +- latest/modules/mod_private/index.html | 2 +- latest/modules/mod_pubsub/index.html | 2 +- .../mod_push_service_mongoosepush/index.html | 2 +- latest/modules/mod_register/index.html | 2 +- latest/modules/mod_roster/index.html | 2 +- latest/modules/mod_sasl2/index.html | 2 +- .../modules/mod_shared_roster_ldap/index.html | 2 +- latest/modules/mod_sic/index.html | 2 +- latest/modules/mod_smart_markers/index.html | 2 +- .../modules/mod_stream_management/index.html | 2 +- latest/modules/mod_time/index.html | 2 +- latest/modules/mod_vcard/index.html | 2 +- latest/modules/mod_version/index.html | 2 +- latest/open-extensions/inbox/index.html | 2 +- latest/open-extensions/mam/index.html | 2 +- latest/open-extensions/muc_light/index.html | 2 +- .../open-extensions/smart-markers/index.html | 2 +- .../token-reconnection/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../Cluster-restart/index.html | 2 +- .../Humio/index.html | 2 +- .../Logging-&-monitoring/index.html | 2 +- .../Logging-fields/index.html | 2 +- .../Logging/index.html | 2 +- .../MongooseIM-metrics/index.html | 2 +- .../Rolling-upgrade/index.html | 2 +- .../System-Metrics-Privacy-Policy/index.html | 2 +- .../gdpr-considerations/index.html | 2 +- .../known-issues/index.html | 2 +- .../tls-distribution/index.html | 2 +- .../Administration-backend/index.html | 2 +- latest/rest-api/Client-frontend/index.html | 2 +- latest/search/search_index.json | 2 +- latest/sitemap.xml | 296 +++++++------- latest/sitemap.xml.gz | Bin 1547 -> 1547 bytes latest/tutorials/CETS-configure/index.html | 2 +- latest/tutorials/Docker-build/index.html | 2 +- latest/tutorials/How-to-build/index.html | 2 +- latest/tutorials/ICE_tutorial/index.html | 2 +- latest/tutorials/Jingle-SIP-setup/index.html | 2 +- .../tutorials/client-certificate/index.html | 2 +- latest/tutorials/iOS_tutorial/index.html | 2 +- .../MongoosePush-setup/index.html | 2 +- .../Push-notifications-client-side/index.html | 2 +- .../Push-notifications/index.html | 2 +- latest/user-guide/Features/index.html | 2 +- .../High-level-Architecture/index.html | 2 +- latest/user-guide/Supported-XEPs/index.html | 2 +- .../user-guide/Supported-standards/index.html | 2 +- 154 files changed, 615 insertions(+), 607 deletions(-) diff --git a/latest/404.html b/latest/404.html index 7028d84ae..e1912e6d1 100644 --- a/latest/404.html +++ b/latest/404.html @@ -16,7 +16,7 @@ - + diff --git a/latest/Contributions/index.html b/latest/Contributions/index.html index f5d812c79..992a77885 100644 --- a/latest/Contributions/index.html +++ b/latest/Contributions/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/History/index.html b/latest/History/index.html index edc225ba8..3c5bbb8b8 100644 --- a/latest/History/index.html +++ b/latest/History/index.html @@ -20,7 +20,7 @@ - + diff --git a/latest/authentication-methods/anonymous/index.html b/latest/authentication-methods/anonymous/index.html index fca16fc3c..02feae967 100644 --- a/latest/authentication-methods/anonymous/index.html +++ b/latest/authentication-methods/anonymous/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/authentication-methods/dummy/index.html b/latest/authentication-methods/dummy/index.html index da1cb372a..4fd24aecc 100644 --- a/latest/authentication-methods/dummy/index.html +++ b/latest/authentication-methods/dummy/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/authentication-methods/external/index.html b/latest/authentication-methods/external/index.html index 443a7a2df..95c83f37b 100644 --- a/latest/authentication-methods/external/index.html +++ b/latest/authentication-methods/external/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/authentication-methods/http/index.html b/latest/authentication-methods/http/index.html index 5b0533e30..dc9745db0 100644 --- a/latest/authentication-methods/http/index.html +++ b/latest/authentication-methods/http/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/authentication-methods/jwt/index.html b/latest/authentication-methods/jwt/index.html index 071cec03f..16d270479 100644 --- a/latest/authentication-methods/jwt/index.html +++ b/latest/authentication-methods/jwt/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/authentication-methods/ldap/index.html b/latest/authentication-methods/ldap/index.html index 860e1f73d..d75f64aba 100644 --- a/latest/authentication-methods/ldap/index.html +++ b/latest/authentication-methods/ldap/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/authentication-methods/pki/index.html b/latest/authentication-methods/pki/index.html index 03ee0e040..ff4fe334e 100644 --- a/latest/authentication-methods/pki/index.html +++ b/latest/authentication-methods/pki/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/authentication-methods/rdbms/index.html b/latest/authentication-methods/rdbms/index.html index aa24b7e93..f487d012c 100644 --- a/latest/authentication-methods/rdbms/index.html +++ b/latest/authentication-methods/rdbms/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/Erlang-cookie-security/index.html b/latest/configuration/Erlang-cookie-security/index.html index 5fdd15e2b..eaecf6fa2 100644 --- a/latest/configuration/Erlang-cookie-security/index.html +++ b/latest/configuration/Erlang-cookie-security/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/Modules/index.html b/latest/configuration/Modules/index.html index 63e1fd487..ade274314 100644 --- a/latest/configuration/Modules/index.html +++ b/latest/configuration/Modules/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/Services/index.html b/latest/configuration/Services/index.html index f841700a0..70836422e 100644 --- a/latest/configuration/Services/index.html +++ b/latest/configuration/Services/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/TLS-hardening/index.html b/latest/configuration/TLS-hardening/index.html index 6a8ed92d3..6bdcbe296 100644 --- a/latest/configuration/TLS-hardening/index.html +++ b/latest/configuration/TLS-hardening/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/access/index.html b/latest/configuration/access/index.html index 0c8d8a27d..cd766d5c1 100644 --- a/latest/configuration/access/index.html +++ b/latest/configuration/access/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/acl/index.html b/latest/configuration/acl/index.html index 101a96e15..e80ed0182 100644 --- a/latest/configuration/acl/index.html +++ b/latest/configuration/acl/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/auth/index.html b/latest/configuration/auth/index.html index df749abab..114272e3c 100644 --- a/latest/configuration/auth/index.html +++ b/latest/configuration/auth/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/configuration-files/index.html b/latest/configuration/configuration-files/index.html index 95fb1fd4c..2ba6bfcb3 100644 --- a/latest/configuration/configuration-files/index.html +++ b/latest/configuration/configuration-files/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/database-backends-configuration/index.html b/latest/configuration/database-backends-configuration/index.html index 077388ccf..e4dbeaad0 100644 --- a/latest/configuration/database-backends-configuration/index.html +++ b/latest/configuration/database-backends-configuration/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/general/index.html b/latest/configuration/general/index.html index 86cf4acce..b80784840 100644 --- a/latest/configuration/general/index.html +++ b/latest/configuration/general/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/host_config/index.html b/latest/configuration/host_config/index.html index 2dac69c2b..e4c1601a5 100644 --- a/latest/configuration/host_config/index.html +++ b/latest/configuration/host_config/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/internal-databases/index.html b/latest/configuration/internal-databases/index.html index 8657a0470..8950622c1 100644 --- a/latest/configuration/internal-databases/index.html +++ b/latest/configuration/internal-databases/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/listen/index.html b/latest/configuration/listen/index.html index 347d0b742..a4fe9e523 100644 --- a/latest/configuration/listen/index.html +++ b/latest/configuration/listen/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/outgoing-connections/index.html b/latest/configuration/outgoing-connections/index.html index 84344d7b0..f70672d25 100644 --- a/latest/configuration/outgoing-connections/index.html +++ b/latest/configuration/outgoing-connections/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/release-options/index.html b/latest/configuration/release-options/index.html index 5e263901a..911820455 100644 --- a/latest/configuration/release-options/index.html +++ b/latest/configuration/release-options/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/s2s/index.html b/latest/configuration/s2s/index.html index 26e005b2f..cabae1c92 100644 --- a/latest/configuration/s2s/index.html +++ b/latest/configuration/s2s/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/configuration/shaper/index.html b/latest/configuration/shaper/index.html index e8cbd8565..2b913a784 100644 --- a/latest/configuration/shaper/index.html +++ b/latest/configuration/shaper/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/Basic-iq-handler/index.html b/latest/developers-guide/Basic-iq-handler/index.html index 777a8bb9b..45332c72f 100644 --- a/latest/developers-guide/Basic-iq-handler/index.html +++ b/latest/developers-guide/Basic-iq-handler/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/Bootstrap-Scripts/index.html b/latest/developers-guide/Bootstrap-Scripts/index.html index fa8d13a34..71ace361f 100644 --- a/latest/developers-guide/Bootstrap-Scripts/index.html +++ b/latest/developers-guide/Bootstrap-Scripts/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/Hooks-and-handlers/index.html b/latest/developers-guide/Hooks-and-handlers/index.html index 04d63fd64..a8c2b4f47 100644 --- a/latest/developers-guide/Hooks-and-handlers/index.html +++ b/latest/developers-guide/Hooks-and-handlers/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/OpenSSL-and-FIPS/index.html b/latest/developers-guide/OpenSSL-and-FIPS/index.html index 844d6f0ef..cd94f0425 100644 --- a/latest/developers-guide/OpenSSL-and-FIPS/index.html +++ b/latest/developers-guide/OpenSSL-and-FIPS/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/SCRAM-serialization/index.html b/latest/developers-guide/SCRAM-serialization/index.html index ce22c3e0b..702c5cc2d 100644 --- a/latest/developers-guide/SCRAM-serialization/index.html +++ b/latest/developers-guide/SCRAM-serialization/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/Stanza-routing/index.html b/latest/developers-guide/Stanza-routing/index.html index 61fdb55e9..db986a255 100644 --- a/latest/developers-guide/Stanza-routing/index.html +++ b/latest/developers-guide/Stanza-routing/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/Testing-MongooseIM/index.html b/latest/developers-guide/Testing-MongooseIM/index.html index fab0fd939..8a6c951fd 100644 --- a/latest/developers-guide/Testing-MongooseIM/index.html +++ b/latest/developers-guide/Testing-MongooseIM/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/accumulators/index.html b/latest/developers-guide/accumulators/index.html index c6f436283..0aa69d05b 100644 --- a/latest/developers-guide/accumulators/index.html +++ b/latest/developers-guide/accumulators/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/domain_management/index.html b/latest/developers-guide/domain_management/index.html index 7fbbab5fa..855b51a07 100644 --- a/latest/developers-guide/domain_management/index.html +++ b/latest/developers-guide/domain_management/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/hooks_description/index.html b/latest/developers-guide/hooks_description/index.html index 9ac3aecdb..d8252205d 100644 --- a/latest/developers-guide/hooks_description/index.html +++ b/latest/developers-guide/hooks_description/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/logging/index.html b/latest/developers-guide/logging/index.html index 9ded5a69d..6fc968075 100644 --- a/latest/developers-guide/logging/index.html +++ b/latest/developers-guide/logging/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/mod_amp_developers_guide/index.html b/latest/developers-guide/mod_amp_developers_guide/index.html index ec97ba3c5..76b786e03 100644 --- a/latest/developers-guide/mod_amp_developers_guide/index.html +++ b/latest/developers-guide/mod_amp_developers_guide/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/mod_muc_light_developers_guide/index.html b/latest/developers-guide/mod_muc_light_developers_guide/index.html index fc55f91c9..34c1bf2f5 100644 --- a/latest/developers-guide/mod_muc_light_developers_guide/index.html +++ b/latest/developers-guide/mod_muc_light_developers_guide/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/mongoose_wpool/index.html b/latest/developers-guide/mongoose_wpool/index.html index 68f87e70a..4e1ba9801 100644 --- a/latest/developers-guide/mongoose_wpool/index.html +++ b/latest/developers-guide/mongoose_wpool/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/release_config/index.html b/latest/developers-guide/release_config/index.html index 0123be08b..a59990581 100644 --- a/latest/developers-guide/release_config/index.html +++ b/latest/developers-guide/release_config/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/developers-guide/xep_tool/index.html b/latest/developers-guide/xep_tool/index.html index d66eac5ec..d7dc5d0d4 100644 --- a/latest/developers-guide/xep_tool/index.html +++ b/latest/developers-guide/xep_tool/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/getting-started/Installation/index.html b/latest/getting-started/Installation/index.html index 074316bdf..da4d89bc4 100644 --- a/latest/getting-started/Installation/index.html +++ b/latest/getting-started/Installation/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/getting-started/Quick-setup/index.html b/latest/getting-started/Quick-setup/index.html index 18eab04b8..9ae09db6b 100644 --- a/latest/getting-started/Quick-setup/index.html +++ b/latest/getting-started/Quick-setup/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/graphql-api/Admin-GraphQL/index.html b/latest/graphql-api/Admin-GraphQL/index.html index b2030ee62..80177c2a9 100644 --- a/latest/graphql-api/Admin-GraphQL/index.html +++ b/latest/graphql-api/Admin-GraphQL/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/graphql-api/User-GraphQL/index.html b/latest/graphql-api/User-GraphQL/index.html index ea7c03bb4..7e7f73a9b 100644 --- a/latest/graphql-api/User-GraphQL/index.html +++ b/latest/graphql-api/User-GraphQL/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/graphql-api/admin-graphql-doc.html b/latest/graphql-api/admin-graphql-doc.html index 4e884ac19..2e1e02476 100644 --- a/latest/graphql-api/admin-graphql-doc.html +++ b/latest/graphql-api/admin-graphql-doc.html @@ -338,7 +338,7 @@
Response
"data": { "account": { "listUsers": ["xyz789"], - "countUsers": 123, + "countUsers": 987, "checkPassword": CheckPasswordPayload, "checkPasswordHash": CheckPasswordPayload, "checkUser": CheckUserPayload @@ -1197,12 +1197,12 @@
Response
"data": { "session": { "listSessions": [Session], - "countSessions": 123, + "countSessions": 987, "listUserSessions": [Session], - "countUserResources": 987, + "countUserResources": 123, "getUserResource": "res1", "listUsersWithStatus": [UserStatus], - "countUsersWithStatus": 123 + "countUsersWithStatus": 987 } } } @@ -1627,7 +1627,7 @@
Response
{
   "data": {
-    "inbox": {"flushUserBin": 987, "flushDomainBin": 123, "flushGlobalBin": 123}
+    "inbox": {"flushUserBin": 123, "flushDomainBin": 123, "flushGlobalBin": 987}
   }
 }
 
@@ -1751,14 +1751,14 @@
Response
{
   "data": {
     "mnesia": {
-      "setMaster": "xyz789",
-      "changeNodename": "abc123",
-      "backup": "xyz789",
+      "setMaster": "abc123",
+      "changeNodename": "xyz789",
+      "backup": "abc123",
       "restore": "xyz789",
       "dump": "xyz789",
-      "dumpTable": "xyz789",
-      "load": "abc123",
-      "installFallback": "abc123"
+      "dumpTable": "abc123",
+      "load": "xyz789",
+      "installFallback": "xyz789"
     }
   }
 }
@@ -1830,14 +1830,14 @@ 
Response
"muc": { "createInstantRoom": MUCRoomDesc, "inviteUser": "abc123", - "kickUser": "xyz789", + "kickUser": "abc123", "sendMessageToRoom": "abc123", "sendPrivateMessage": "xyz789", - "deleteRoom": "abc123", + "deleteRoom": "xyz789", "changeRoomConfiguration": MUCRoomConfig, - "setUserRole": "xyz789", - "setUserAffiliation": "xyz789", - "enterRoom": "abc123", + "setUserRole": "abc123", + "setUserAffiliation": "abc123", + "enterRoom": "xyz789", "exitRoom": "abc123" } } @@ -1906,11 +1906,11 @@
Response
"muc_light": { "createRoom": Room, "changeRoomConfiguration": Room, - "inviteUser": "xyz789", + "inviteUser": "abc123", "deleteRoom": "abc123", - "kickUser": "abc123", + "kickUser": "xyz789", "sendMessageToRoom": "xyz789", - "setBlockingList": "abc123" + "setBlockingList": "xyz789" } } } @@ -1968,7 +1968,7 @@
Response
"data": { "offline": { "deleteExpiredMessages": "xyz789", - "deleteOldMessages": "abc123" + "deleteOldMessages": "xyz789" } } } @@ -2087,14 +2087,14 @@
Response
{
   "data": {
     "roster": {
-      "addContact": "xyz789",
+      "addContact": "abc123",
       "addContacts": ["xyz789"],
-      "subscription": "xyz789",
+      "subscription": "abc123",
       "deleteContact": "xyz789",
-      "deleteContacts": ["abc123"],
-      "setMutualSubscription": "xyz789",
-      "subscribeToAll": ["xyz789"],
-      "subscribeAllToAll": ["xyz789"]
+      "deleteContacts": ["xyz789"],
+      "setMutualSubscription": "abc123",
+      "subscribeToAll": ["abc123"],
+      "subscribeAllToAll": ["abc123"]
     }
   }
 }
@@ -2157,11 +2157,11 @@ 
Response
"data": { "server": { "joinCluster": "xyz789", - "leaveCluster": "abc123", - "removeFromCluster": "xyz789", - "restart": "abc123", - "stop": "abc123", - "removeNode": "abc123", + "leaveCluster": "xyz789", + "removeFromCluster": "abc123", + "restart": "xyz789", + "stop": "xyz789", + "removeNode": "xyz789", "setLoglevel": "abc123" } } @@ -2354,7 +2354,7 @@
Response
"data": { "token": { "requestToken": Token, - "revokeToken": "xyz789" + "revokeToken": "abc123" } } } @@ -2821,12 +2821,12 @@
Example
{
   "tags": ["HOME"],
-  "pobox": "abc123",
-  "extadd": "abc123",
-  "street": "xyz789",
+  "pobox": "xyz789",
+  "extadd": "xyz789",
+  "street": "abc123",
   "locality": "xyz789",
-  "region": "xyz789",
-  "pcode": "abc123",
+  "region": "abc123",
+  "pcode": "xyz789",
   "country": "xyz789"
 }
 
@@ -2913,9 +2913,9 @@
Example
{
   "tags": ["HOME"],
   "pobox": "abc123",
-  "extadd": "abc123",
-  "street": "abc123",
-  "locality": "xyz789",
+  "extadd": "xyz789",
+  "street": "xyz789",
+  "locality": "abc123",
   "region": "xyz789",
   "pcode": "xyz789",
   "country": "xyz789"
@@ -3417,7 +3417,7 @@ 
Fields
Example
-
{"binValue": "abc123"}
+                    
{"binValue": "xyz789"}
 
@@ -3799,17 +3799,17 @@
Example
"availableNodes": ["xyz789"], "unavailableNodes": ["abc123"], "joinedNodes": ["xyz789"], - "discoveredNodes": ["abc123"], + "discoveredNodes": ["xyz789"], "remoteNodesWithoutDisco": ["xyz789"], "remoteNodesWithUnknownTables": [ "xyz789" ], "remoteUnknownTables": ["xyz789"], "remoteNodesWithMissingTables": [ - "xyz789" + "abc123" ], "remoteMissingTables": ["abc123"], - "conflictNodes": ["abc123"], + "conflictNodes": ["xyz789"], "conflictTables": ["abc123"], "discoveryWorks": true } @@ -3919,17 +3919,17 @@
Example
{
   "name": ["abc123"],
   "type": "histogram",
-  "available_nodes": 123,
+  "available_nodes": 987,
   "unavailable_nodes": 987,
   "joined_nodes": 987,
-  "remote_nodes_without_disco": 987,
-  "remote_nodes_with_unknown_tables": 987,
-  "remote_unknown_tables": 987,
+  "remote_nodes_without_disco": 123,
+  "remote_nodes_with_unknown_tables": 123,
+  "remote_unknown_tables": 123,
   "remote_nodes_with_missing_tables": 987,
   "remote_missing_tables": 987,
   "conflict_nodes": 987,
-  "conflict_tables": 123,
-  "discovered_nodes": 123,
+  "conflict_tables": 987,
+  "discovered_nodes": 987,
   "discovery_works": 987
 }
 
@@ -3987,7 +3987,7 @@
Example
{
   "tableName": "xyz789",
-  "memory": 123,
+  "memory": 987,
   "size": 123,
   "nodes": ["xyz789"]
 }
@@ -4038,7 +4038,7 @@ 
Fields
Example
-
{"correct": true, "message": "xyz789"}
+                    
{"correct": true, "message": "abc123"}
 
@@ -4086,7 +4086,7 @@
Fields
Example
-
{"exist": false, "message": "abc123"}
+                    
{"exist": true, "message": "abc123"}
 
@@ -4151,7 +4151,7 @@
Example
{
   "jid": "alice@localhost",
-  "name": "xyz789",
+  "name": "abc123",
   "groups": ["xyz789"],
   "subscription": "NONE",
   "ask": "SUBSCRIBE"
@@ -4409,7 +4409,7 @@ 
Example
"name": ["abc123"], "type": "histogram", "value": 123, - "ms_since_reset": 123 + "ms_since_reset": 987 }
@@ -4674,8 +4674,8 @@
Example
"requestRemoveDomain": Domain, "enableDomain": Domain, "disableDomain": Domain, - "setDomainPassword": "abc123", - "deleteDomainPassword": "abc123" + "setDomainPassword": "xyz789", + "deleteDomainPassword": "xyz789" }
@@ -4818,7 +4818,7 @@
Fields
Example
-
{"registeredUsers": 123, "onlineUsers": 123}
+                    
{"registeredUsers": 987, "onlineUsers": 123}
 
@@ -5073,7 +5073,7 @@
Fields
Example
-
{"extValue": "xyz789"}
+                    
{"extValue": "abc123"}
 
@@ -5127,8 +5127,8 @@
Example
{
-  "putUrl": "xyz789",
-  "getUrl": "xyz789",
+  "putUrl": "abc123",
+  "getUrl": "abc123",
   "headers": [Header]
 }
 
@@ -5317,7 +5317,7 @@
Example
{
-  "lat": "abc123",
+  "lat": "xyz789",
   "lon": "xyz789"
 }
 
@@ -5366,8 +5366,8 @@
Example
{
-  "lat": "abc123",
-  "lon": "abc123"
+  "lat": "xyz789",
+  "lon": "xyz789"
 }
 
@@ -5433,12 +5433,12 @@
Example
{
-  "uptimeSeconds": 987,
-  "registeredUsers": 123,
-  "onlineUsersNode": 987,
-  "onlineUsers": 123,
+  "uptimeSeconds": 123,
+  "registeredUsers": 987,
+  "onlineUsersNode": 123,
+  "onlineUsers": 987,
   "incomingS2S": 987,
-  "outgoingS2S": 123
+  "outgoingS2S": 987
 }
 
@@ -5489,7 +5489,7 @@
Example
{
   "name": "abc123",
-  "value": "abc123"
+  "value": "xyz789"
 }
 
@@ -5595,14 +5595,14 @@
Example
"n": 987, "mean": 123, "min": 987, - "max": 987, - "median": 987, - "p50": 123, - "p75": 987, + "max": 123, + "median": 123, + "p50": 987, + "p75": 123, "p90": 123, - "p95": 987, + "p95": 123, "p99": 987, - "p999": 123 + "p999": 987 }
@@ -5792,8 +5792,8 @@
Example
{
-  "type": "xyz789",
-  "binValue": "xyz789"
+  "type": "abc123",
+  "binValue": "abc123"
 }
 
@@ -5847,8 +5847,8 @@
Example
{
-  "type": "xyz789",
-  "binValue": "abc123",
+  "type": "abc123",
+  "binValue": "xyz789",
   "extValue": "abc123"
 }
 
@@ -5924,13 +5924,13 @@
Example
{
-  "status": "abc123",
+  "status": "xyz789",
   "created": ["alice@localhost"],
   "existing": ["alice@localhost"],
   "notAllowed": ["alice@localhost"],
-  "invalidJID": ["abc123"],
+  "invalidJID": ["xyz789"],
   "emptyPassword": ["alice@localhost"],
-  "invalidRecord": ["abc123"]
+  "invalidRecord": ["xyz789"]
 }
 
@@ -6041,7 +6041,7 @@
daysExample
-
{"flushUserBin": 987, "flushDomainBin": 123, "flushGlobalBin": 987}
+                    
{"flushUserBin": 123, "flushDomainBin": 123, "flushGlobalBin": 123}
 
@@ -6066,7 +6066,7 @@
Description
Example
-
987
+                    
123
 
@@ -6137,7 +6137,7 @@
Example
{
   "type": "abc123",
-  "credential": "xyz789"
+  "credential": "abc123"
 }
 
@@ -6185,7 +6185,7 @@
Example
{
-  "type": "xyz789",
+  "type": "abc123",
   "credential": "xyz789"
 }
 
@@ -6328,7 +6328,7 @@
Example
"jid": "alice@localhost", "kicked": false, "code": "abc123", - "message": "xyz789" + "message": "abc123" }
@@ -6471,7 +6471,7 @@
Example
{
   "user": "alice@localhost",
   "timestamp": "2007-12-03T10:15:30Z",
-  "status": "xyz789"
+  "status": "abc123"
 }
 
@@ -7093,14 +7093,14 @@
Example
{
   "createInstantRoom": MUCRoomDesc,
-  "inviteUser": "abc123",
+  "inviteUser": "xyz789",
   "kickUser": "xyz789",
-  "sendMessageToRoom": "abc123",
-  "sendPrivateMessage": "abc123",
-  "deleteRoom": "abc123",
+  "sendMessageToRoom": "xyz789",
+  "sendPrivateMessage": "xyz789",
+  "deleteRoom": "xyz789",
   "changeRoomConfiguration": MUCRoomConfig,
   "setUserRole": "abc123",
-  "setUserAffiliation": "abc123",
+  "setUserAffiliation": "xyz789",
   "enterRoom": "abc123",
   "exitRoom": "abc123"
 }
@@ -7556,10 +7556,10 @@ 
Example
{
   "createRoom": Room,
   "changeRoomConfiguration": Room,
-  "inviteUser": "abc123",
+  "inviteUser": "xyz789",
   "deleteRoom": "abc123",
-  "kickUser": "xyz789",
-  "sendMessageToRoom": "xyz789",
+  "kickUser": "abc123",
+  "sendMessageToRoom": "abc123",
   "setBlockingList": "abc123"
 }
 
@@ -7954,22 +7954,22 @@
Example
{
-  "title": "xyz789",
-  "description": "abc123",
+  "title": "abc123",
+  "description": "xyz789",
   "allowChangeSubject": false,
   "allowQueryUsers": true,
-  "allowPrivateMessages": false,
-  "allowVisitorStatus": false,
+  "allowPrivateMessages": true,
+  "allowVisitorStatus": true,
   "allowVisitorNickchange": false,
-  "public": true,
-  "publicList": true,
+  "public": false,
+  "publicList": false,
   "persistent": true,
   "moderated": false,
   "membersByDefault": false,
-  "membersOnly": true,
+  "membersOnly": false,
   "allowUserInvites": true,
   "allowMultipleSession": true,
-  "passwordProtected": false,
+  "passwordProtected": true,
   "password": "xyz789",
   "anonymous": true,
   "mayGetMemberList": ["xyz789"],
@@ -8141,22 +8141,22 @@ 
Example
{
   "title": "abc123",
-  "description": "abc123",
-  "allowChangeSubject": true,
-  "allowQueryUsers": true,
-  "allowPrivateMessages": false,
+  "description": "xyz789",
+  "allowChangeSubject": false,
+  "allowQueryUsers": false,
+  "allowPrivateMessages": true,
   "allowVisitorStatus": false,
-  "allowVisitorNickchange": false,
-  "public": false,
-  "publicList": false,
+  "allowVisitorNickchange": true,
+  "public": true,
+  "publicList": true,
   "persistent": true,
-  "moderated": true,
-  "membersByDefault": false,
-  "membersOnly": false,
+  "moderated": false,
+  "membersByDefault": true,
+  "membersOnly": true,
   "allowUserInvites": true,
   "allowMultipleSession": false,
-  "passwordProtected": false,
-  "password": "xyz789",
+  "passwordProtected": true,
+  "password": "abc123",
   "anonymous": false,
   "mayGetMemberList": ["abc123"],
   "maxUsers": 2,
@@ -8222,7 +8222,7 @@ 
Example
{
   "jid": "alice@localhost",
   "title": "abc123",
-  "private": true,
+  "private": false,
   "usersNumber": 0
 }
 
@@ -8279,7 +8279,7 @@
Example
{
   "jid": "alice@localhost",
-  "nick": "xyz789",
+  "nick": "abc123",
   "role": "VISITOR"
 }
 
@@ -8348,7 +8348,7 @@
Example
"rooms": [MUCRoomDesc], "count": 0, "index": 0, - "first": "abc123", + "first": "xyz789", "last": "xyz789" }
@@ -8435,15 +8435,15 @@
Example
{
-  "name": ["abc123"],
+  "name": ["xyz789"],
   "type": "histogram",
-  "connections": 987,
-  "recv_cnt": 123,
+  "connections": 123,
+  "recv_cnt": 987,
   "recv_max": 987,
   "recv_oct": 987,
   "send_cnt": 987,
   "send_max": 987,
-  "send_oct": 123,
+  "send_oct": 987,
   "send_pend": 123
 }
 
@@ -8597,7 +8597,7 @@
Fields
Example
-
{"key": "xyz789", "value": 987}
+                    
{"key": "xyz789", "value": 123}
 
@@ -9077,12 +9077,12 @@
Example
{
-  "setMaster": "abc123",
-  "changeNodename": "abc123",
+  "setMaster": "xyz789",
+  "changeNodename": "xyz789",
   "backup": "abc123",
-  "restore": "xyz789",
+  "restore": "abc123",
   "dump": "abc123",
-  "dumpTable": "abc123",
+  "dumpTable": "xyz789",
   "load": "xyz789",
   "installFallback": "abc123"
 }
@@ -9237,7 +9237,7 @@ 
Fields
Example
-
{"result": 123, "key": "xyz789"}
+                    
{"result": 987, "key": "abc123"}
 
@@ -9286,8 +9286,8 @@
Example
{
-  "result": ["xyz789"],
-  "key": "xyz789"
+  "result": ["abc123"],
+  "key": "abc123"
 }
 
@@ -9450,7 +9450,7 @@
Example
{
   "family": "xyz789",
-  "givenName": "xyz789",
+  "givenName": "abc123",
   "middleName": "xyz789",
   "prefix": "abc123",
   "suffix": "xyz789"
@@ -9522,7 +9522,7 @@ 
Example
"family": "abc123", "givenName": "abc123", "middleName": "xyz789", - "prefix": "xyz789", + "prefix": "abc123", "suffix": "xyz789" }
@@ -9767,8 +9767,8 @@
Example
{
-  "orgname": "xyz789",
-  "orgunit": ["abc123"]
+  "orgname": "abc123",
+  "orgunit": ["xyz789"]
 }
 
@@ -9817,7 +9817,7 @@
Example
{
   "orgname": "xyz789",
-  "orgunit": ["abc123"]
+  "orgunit": ["xyz789"]
 }
 
@@ -9857,7 +9857,7 @@
Fields
Example
-
{"phonetic": "xyz789"}
+                    
{"phonetic": "abc123"}
 
@@ -10355,7 +10355,7 @@
Example
{
-  "name": ["xyz789"],
+  "name": ["abc123"],
   "type": "histogram",
   "fsm": 987,
   "regular": 123,
@@ -10448,10 +10448,10 @@ 
Example
"name": ["abc123"], "type": "histogram", "workers": 123, - "recv_cnt": 123, + "recv_cnt": 987, "recv_max": 987, - "recv_oct": 987, - "send_cnt": 123, + "recv_oct": 123, + "send_cnt": 987, "send_max": 123, "send_oct": 987, "send_pend": 987 @@ -10595,7 +10595,7 @@
Example
{
   "key": "xyz789",
-  "value": "xyz789"
+  "value": "abc123"
 }
 
@@ -10950,12 +10950,12 @@
Example
{
   "addContact": "xyz789",
   "addContacts": ["xyz789"],
-  "subscription": "xyz789",
-  "deleteContact": "abc123",
+  "subscription": "abc123",
+  "deleteContact": "xyz789",
   "deleteContacts": ["xyz789"],
-  "setMutualSubscription": "abc123",
+  "setMutualSubscription": "xyz789",
   "subscribeToAll": ["abc123"],
-  "subscribeAllToAll": ["xyz789"]
+  "subscribeAllToAll": ["abc123"]
 }
 
@@ -11080,7 +11080,7 @@
Fields
Example
-
{"id": 4}
+                    
{"id": "4"}
 
@@ -11206,10 +11206,10 @@
Example
{
-  "joinCluster": "xyz789",
-  "leaveCluster": "xyz789",
+  "joinCluster": "abc123",
+  "leaveCluster": "abc123",
   "removeFromCluster": "xyz789",
-  "restart": "xyz789",
+  "restart": "abc123",
   "stop": "xyz789",
   "removeNode": "xyz789",
   "setLoglevel": "xyz789"
@@ -11345,12 +11345,12 @@ 
Example
{
   "user": "alice@localhost",
-  "connection": "xyz789",
-  "ip": "abc123",
-  "port": 987,
-  "priority": 123,
+  "connection": "abc123",
+  "ip": "xyz789",
+  "port": 123,
+  "priority": 987,
   "node": "abc123",
-  "uptime": 123
+  "uptime": 987
 }
 
@@ -11811,9 +11811,9 @@
Example
{
-  "phonetic": "abc123",
-  "binValue": "abc123",
-  "extValue": "xyz789"
+  "phonetic": "xyz789",
+  "binValue": "xyz789",
+  "extValue": "abc123"
 }
 
@@ -11872,7 +11872,7 @@
Example
"name": ["xyz789"], "type": "histogram", "one": 123, - "count": 123 + "count": 987 }
@@ -12162,7 +12162,7 @@
Example
{
   "sender": "alice@localhost",
   "timestamp": "2007-12-03T10:15:30Z",
-  "stanza_id": "abc123",
+  "stanza_id": "xyz789",
   "stanza": "<message to='bob@localhost' type='chat' from='alice@localhost'><body>Hi!</body></message>"
 }
 
@@ -12336,7 +12336,7 @@
Example
{
   "statusCode": "RUNNING",
-  "message": "xyz789",
+  "message": "abc123",
   "version": "xyz789",
   "commitHash": "abc123"
 }
@@ -12520,7 +12520,7 @@ 
Fields
Example
-
{"tags": ["HOME"], "number": "xyz789"}
+                    
{"tags": ["HOME"], "number": "abc123"}
 
@@ -12566,7 +12566,7 @@
Fields
Example
-
{"tags": ["HOME"], "number": "abc123"}
+                    
{"tags": ["HOME"], "number": "xyz789"}
 
@@ -12731,7 +12731,7 @@
Example
{
-  "access": "abc123",
+  "access": "xyz789",
   "refresh": "abc123"
 }
 
@@ -12941,7 +12941,7 @@
Example
{
   "user": "alice@localhost",
-  "priority": 987,
+  "priority": 123,
   "text": "abc123"
 }
 
@@ -13023,9 +13023,9 @@
Example
"total": 987, "processes_used": 123, "atom_used": 123, - "binary": 987, + "binary": 123, "ets": 987, - "system": 987 + "system": 123 }
@@ -13096,13 +13096,13 @@
Example
{
-  "name": ["xyz789"],
+  "name": ["abc123"],
   "type": "histogram",
   "port_count": 987,
   "port_limit": 123,
-  "process_count": 987,
-  "process_limit": 123,
-  "ets_limit": 987
+  "process_count": 123,
+  "process_limit": 987,
+  "ets_limit": 123
 }
 
@@ -13293,19 +13293,19 @@
Example
"telephone": [Telephone], "email": [Email], "jabberId": ["abc123"], - "mailer": ["abc123"], + "mailer": ["xyz789"], "timeZone": ["xyz789"], "geo": [GeographicalPosition], - "title": ["xyz789"], + "title": ["abc123"], "role": ["xyz789"], "logo": [ImageData], "agent": [AgentVcard], "org": [Organization], "categories": [Keyword], "note": ["xyz789"], - "prodId": ["xyz789"], - "rev": ["abc123"], - "sortString": ["xyz789"], + "prodId": ["abc123"], + "rev": ["xyz789"], + "sortString": ["abc123"], "sound": [Phonetic], "uid": ["abc123"], "url": ["xyz789"], @@ -13637,33 +13637,33 @@
Example
{
-  "formattedName": "abc123",
+  "formattedName": "xyz789",
   "nameComponents": NameComponentsInput,
-  "nickname": ["xyz789"],
+  "nickname": ["abc123"],
   "photo": [ImageInput],
-  "birthday": ["abc123"],
+  "birthday": ["xyz789"],
   "address": [AddressInput],
   "label": [LabelInput],
   "telephone": [TelephoneInput],
   "email": [EmailInput],
-  "jabberId": ["xyz789"],
+  "jabberId": ["abc123"],
   "mailer": ["abc123"],
   "timeZone": ["xyz789"],
   "geo": [GeographicalPositionInput],
-  "title": ["abc123"],
-  "role": ["abc123"],
+  "title": ["xyz789"],
+  "role": ["xyz789"],
   "logo": [ImageInput],
   "agent": [AgentInput],
   "org": [OrganizationInput],
   "categories": [KeywordInput],
-  "note": ["xyz789"],
+  "note": ["abc123"],
   "prodId": ["abc123"],
   "rev": ["xyz789"],
   "sortString": ["abc123"],
   "sound": [SoundInput],
-  "uid": ["abc123"],
+  "uid": ["xyz789"],
   "url": ["abc123"],
-  "desc": ["abc123"],
+  "desc": ["xyz789"],
   "class": [PrivacyInput],
   "key": [KeyInput]
 }
diff --git a/latest/graphql-api/user-graphql-doc.html b/latest/graphql-api/user-graphql-doc.html
index e2b19f373..421a7614c 100644
--- a/latest/graphql-api/user-graphql-doc.html
+++ b/latest/graphql-api/user-graphql-doc.html
@@ -850,7 +850,7 @@ 
Response
{
   "data": {
     "account": {
-      "unregister": "abc123",
+      "unregister": "xyz789",
       "changePassword": "xyz789"
     }
   }
@@ -956,7 +956,7 @@ 
Query
Response
-
{"data": {"inbox": {"flushBin": 987}}}
+                    
{"data": {"inbox": {"flushBin": 123}}}
 
@@ -1076,10 +1076,10 @@
Response
"data": { "muc": { "createInstantRoom": MUCRoomDesc, - "inviteUser": "abc123", - "kickUser": "abc123", - "sendMessageToRoom": "abc123", - "sendPrivateMessage": "abc123", + "inviteUser": "xyz789", + "kickUser": "xyz789", + "sendMessageToRoom": "xyz789", + "sendPrivateMessage": "xyz789", "deleteRoom": "xyz789", "changeRoomConfiguration": MUCRoomConfig, "setUserRole": "abc123", @@ -1153,11 +1153,11 @@
Response
"muc_light": { "createRoom": Room, "changeRoomConfiguration": Room, - "inviteUser": "abc123", - "deleteRoom": "xyz789", - "kickUser": "xyz789", - "sendMessageToRoom": "xyz789", - "setBlockingList": "xyz789" + "inviteUser": "xyz789", + "deleteRoom": "abc123", + "kickUser": "abc123", + "sendMessageToRoom": "abc123", + "setBlockingList": "abc123" } } } @@ -1273,10 +1273,10 @@
Response
{
   "data": {
     "roster": {
-      "addContact": "abc123",
-      "addContacts": ["xyz789"],
+      "addContact": "xyz789",
+      "addContacts": ["abc123"],
       "subscription": "xyz789",
-      "deleteContact": "xyz789",
+      "deleteContact": "abc123",
       "deleteContacts": ["abc123"]
     }
   }
@@ -1403,7 +1403,7 @@ 
Response
"data": { "token": { "requestToken": Token, - "revokeToken": "xyz789" + "revokeToken": "abc123" } } } @@ -1560,7 +1560,7 @@
Example
{
-  "unregister": "xyz789",
+  "unregister": "abc123",
   "changePassword": "abc123"
 }
 
@@ -1682,13 +1682,13 @@
Example
{
   "tags": ["HOME"],
-  "pobox": "xyz789",
+  "pobox": "abc123",
   "extadd": "abc123",
-  "street": "abc123",
+  "street": "xyz789",
   "locality": "xyz789",
-  "region": "xyz789",
+  "region": "abc123",
   "pcode": "abc123",
-  "country": "xyz789"
+  "country": "abc123"
 }
 
@@ -1773,12 +1773,12 @@
Example
{
   "tags": ["HOME"],
-  "pobox": "abc123",
-  "extadd": "xyz789",
+  "pobox": "xyz789",
+  "extadd": "abc123",
   "street": "abc123",
   "locality": "xyz789",
-  "region": "xyz789",
-  "pcode": "xyz789",
+  "region": "abc123",
+  "pcode": "abc123",
   "country": "abc123"
 }
 
@@ -2169,7 +2169,7 @@
Fields
Example
-
{"binValue": "xyz789"}
+                    
{"binValue": "abc123"}
 
@@ -2477,7 +2477,7 @@
Example
{
   "jid": "alice@localhost",
   "name": "abc123",
-  "groups": ["abc123"],
+  "groups": ["xyz789"],
   "subscription": "NONE",
   "ask": "SUBSCRIBE"
 }
@@ -2612,8 +2612,8 @@ 
Example
{
   "jid": "alice@localhost",
-  "name": "abc123",
-  "groups": ["abc123"]
+  "name": "xyz789",
+  "groups": ["xyz789"]
 }
 
@@ -2923,7 +2923,7 @@
Fields
Example
-
{"extValue": "abc123"}
+                    
{"extValue": "xyz789"}
 
@@ -2977,7 +2977,7 @@
Example
{
-  "putUrl": "xyz789",
+  "putUrl": "abc123",
   "getUrl": "abc123",
   "headers": [Header]
 }
@@ -3025,7 +3025,7 @@ 
Example
{
-  "lat": "xyz789",
+  "lat": "abc123",
   "lon": "abc123"
 }
 
@@ -3075,7 +3075,7 @@
Example
{
   "lat": "abc123",
-  "lon": "xyz789"
+  "lon": "abc123"
 }
 
@@ -3125,7 +3125,7 @@
Example
{
-  "name": "abc123",
+  "name": "xyz789",
   "value": "abc123"
 }
 
@@ -3220,7 +3220,7 @@
Description
Example
-
"4"
+                    
4
 
@@ -3313,7 +3313,7 @@
Example
{
   "type": "abc123",
-  "binValue": "abc123"
+  "binValue": "xyz789"
 }
 
@@ -3367,9 +3367,9 @@
Example
{
-  "type": "xyz789",
+  "type": "abc123",
   "binValue": "abc123",
-  "extValue": "xyz789"
+  "extValue": "abc123"
 }
 
@@ -3427,7 +3427,7 @@
daysExample
-
{"flushBin": 123}
+                    
{"flushBin": 987}
 
@@ -3452,7 +3452,7 @@
Description
Example
-
123
+                    
987
 
@@ -3571,8 +3571,8 @@
Example
{
-  "type": "abc123",
-  "credential": "xyz789"
+  "type": "xyz789",
+  "credential": "abc123"
 }
 
@@ -3696,7 +3696,7 @@
Fields
Example
-
{"tags": ["HOME"], "line": ["xyz789"]}
+                    
{"tags": ["HOME"], "line": ["abc123"]}
 
@@ -3794,7 +3794,7 @@
Example
{
   "user": "alice@localhost",
   "timestamp": "2007-12-03T10:15:30Z",
-  "status": "xyz789"
+  "status": "abc123"
 }
 
@@ -4186,11 +4186,11 @@
Example
{
   "createRoom": Room,
   "changeRoomConfiguration": Room,
-  "inviteUser": "xyz789",
+  "inviteUser": "abc123",
   "deleteRoom": "xyz789",
   "kickUser": "xyz789",
   "sendMessageToRoom": "abc123",
-  "setBlockingList": "xyz789"
+  "setBlockingList": "abc123"
 }
 
@@ -4558,24 +4558,24 @@
Example
{
-  "title": "abc123",
+  "title": "xyz789",
   "description": "abc123",
-  "allowChangeSubject": false,
+  "allowChangeSubject": true,
   "allowQueryUsers": true,
-  "allowPrivateMessages": false,
+  "allowPrivateMessages": true,
   "allowVisitorStatus": false,
-  "allowVisitorNickchange": false,
+  "allowVisitorNickchange": true,
   "public": true,
-  "publicList": true,
-  "persistent": true,
+  "publicList": false,
+  "persistent": false,
   "moderated": true,
   "membersByDefault": false,
-  "membersOnly": false,
-  "allowUserInvites": true,
+  "membersOnly": true,
+  "allowUserInvites": false,
   "allowMultipleSession": false,
-  "passwordProtected": false,
+  "passwordProtected": true,
   "password": "abc123",
-  "anonymous": false,
+  "anonymous": true,
   "mayGetMemberList": ["xyz789"],
   "maxUsers": 2,
   "logging": false
@@ -4744,27 +4744,27 @@ 
Example
{
-  "title": "xyz789",
+  "title": "abc123",
   "description": "xyz789",
-  "allowChangeSubject": false,
-  "allowQueryUsers": true,
+  "allowChangeSubject": true,
+  "allowQueryUsers": false,
   "allowPrivateMessages": true,
-  "allowVisitorStatus": false,
+  "allowVisitorStatus": true,
   "allowVisitorNickchange": true,
   "public": false,
-  "publicList": true,
-  "persistent": false,
-  "moderated": false,
-  "membersByDefault": false,
+  "publicList": false,
+  "persistent": true,
+  "moderated": true,
+  "membersByDefault": true,
   "membersOnly": false,
-  "allowUserInvites": true,
-  "allowMultipleSession": false,
-  "passwordProtected": false,
+  "allowUserInvites": false,
+  "allowMultipleSession": true,
+  "passwordProtected": true,
   "password": "abc123",
   "anonymous": false,
   "mayGetMemberList": ["xyz789"],
   "maxUsers": 2,
-  "logging": true
+  "logging": false
 }
 
@@ -4825,8 +4825,8 @@
Example
{
   "jid": "alice@localhost",
-  "title": "xyz789",
-  "private": false,
+  "title": "abc123",
+  "private": true,
   "usersNumber": 0
 }
 
@@ -4952,7 +4952,7 @@
Example
"rooms": [MUCRoomDesc], "count": 0, "index": 0, - "first": "abc123", + "first": "xyz789", "last": "abc123" }
@@ -5277,15 +5277,15 @@
Example
{
   "createInstantRoom": MUCRoomDesc,
   "inviteUser": "xyz789",
-  "kickUser": "abc123",
-  "sendMessageToRoom": "xyz789",
-  "sendPrivateMessage": "xyz789",
+  "kickUser": "xyz789",
+  "sendMessageToRoom": "abc123",
+  "sendPrivateMessage": "abc123",
   "deleteRoom": "xyz789",
   "changeRoomConfiguration": MUCRoomConfig,
-  "setUserRole": "abc123",
+  "setUserRole": "xyz789",
   "setUserAffiliation": "abc123",
   "enterRoom": "abc123",
-  "exitRoom": "abc123"
+  "exitRoom": "xyz789"
 }
 
@@ -5501,8 +5501,8 @@
Example
{
   "family": "xyz789",
-  "givenName": "xyz789",
-  "middleName": "abc123",
+  "givenName": "abc123",
+  "middleName": "xyz789",
   "prefix": "abc123",
   "suffix": "xyz789"
 }
@@ -5571,9 +5571,9 @@ 
Example
{
   "family": "xyz789",
-  "givenName": "abc123",
+  "givenName": "xyz789",
   "middleName": "abc123",
-  "prefix": "abc123",
+  "prefix": "xyz789",
   "suffix": "xyz789"
 }
 
@@ -5662,7 +5662,7 @@
Example
{
   "orgname": "xyz789",
-  "orgunit": ["xyz789"]
+  "orgunit": ["abc123"]
 }
 
@@ -6117,8 +6117,8 @@
Example
{
   "jid": "alice@localhost",
-  "name": "abc123",
-  "subject": "xyz789",
+  "name": "xyz789",
+  "subject": "abc123",
   "participants": [RoomUser],
   "options": [RoomConfigDictEntry]
 }
@@ -6166,7 +6166,7 @@ 
Example
{
-  "key": "xyz789",
+  "key": "abc123",
   "value": "abc123"
 }
 
@@ -6435,10 +6435,10 @@
Example
{
   "addContact": "abc123",
-  "addContacts": ["abc123"],
-  "subscription": "xyz789",
-  "deleteContact": "xyz789",
-  "deleteContacts": ["xyz789"]
+  "addContacts": ["xyz789"],
+  "subscription": "abc123",
+  "deleteContact": "abc123",
+  "deleteContacts": ["abc123"]
 }
 
@@ -6621,11 +6621,11 @@
Example
{
   "user": "alice@localhost",
-  "connection": "xyz789",
-  "ip": "abc123",
+  "connection": "abc123",
+  "ip": "xyz789",
   "port": 123,
-  "priority": 123,
-  "node": "xyz789",
+  "priority": 987,
+  "node": "abc123",
   "uptime": 123
 }
 
@@ -6682,7 +6682,7 @@
Example
{
   "listResources": ["res1"],
-  "countResources": 123,
+  "countResources": 987,
   "listSessions": [Session]
 }
 
@@ -6789,8 +6789,8 @@
Example
{
-  "phonetic": "abc123",
-  "binValue": "abc123",
+  "phonetic": "xyz789",
+  "binValue": "xyz789",
   "extValue": "xyz789"
 }
 
@@ -6853,7 +6853,7 @@
Example
{
   "sender": "alice@localhost",
   "timestamp": "2007-12-03T10:15:30Z",
-  "stanza_id": "xyz789",
+  "stanza_id": "abc123",
   "stanza": "<message to='bob@localhost' type='chat' from='alice@localhost'><body>Hi!</body></message>"
 }
 
@@ -7122,7 +7122,7 @@
Fields
Example
-
{"stanzas": [StanzaMap], "limit": 123}
+                    
{"stanzas": [StanzaMap], "limit": 987}
 
@@ -7147,7 +7147,7 @@
Description
Example
-
"abc123"
+                    
"xyz789"
 
@@ -7253,7 +7253,7 @@
Fields
Example
-
{"tags": ["HOME"], "number": "abc123"}
+                    
{"tags": ["HOME"], "number": "xyz789"}
 
@@ -7299,7 +7299,7 @@
Fields
Example
-
{"tags": ["HOME"], "number": "abc123"}
+                    
{"tags": ["HOME"], "number": "xyz789"}
 
@@ -7465,7 +7465,7 @@
Example
{
   "access": "abc123",
-  "refresh": "abc123"
+  "refresh": "xyz789"
 }
 
@@ -7773,7 +7773,7 @@
Example
{
-  "formattedName": "xyz789",
+  "formattedName": "abc123",
   "nameComponents": NameComponents,
   "nickname": ["xyz789"],
   "photo": [ImageData],
@@ -7782,24 +7782,24 @@ 
Example
"label": [Label], "telephone": [Telephone], "email": [Email], - "jabberId": ["abc123"], - "mailer": ["abc123"], - "timeZone": ["xyz789"], + "jabberId": ["xyz789"], + "mailer": ["xyz789"], + "timeZone": ["abc123"], "geo": [GeographicalPosition], "title": ["xyz789"], - "role": ["abc123"], + "role": ["xyz789"], "logo": [ImageData], "agent": [AgentVcard], "org": [Organization], "categories": [Keyword], - "note": ["xyz789"], + "note": ["abc123"], "prodId": ["abc123"], "rev": ["abc123"], "sortString": ["xyz789"], "sound": [Phonetic], - "uid": ["xyz789"], - "url": ["xyz789"], - "desc": ["abc123"], + "uid": ["abc123"], + "url": ["abc123"], + "desc": ["xyz789"], "class": [Privacy], "key": [Key] } @@ -8011,7 +8011,7 @@
Example
{
-  "formattedName": "abc123",
+  "formattedName": "xyz789",
   "nameComponents": NameComponentsInput,
   "nickname": ["abc123"],
   "photo": [ImageInput],
@@ -8020,24 +8020,24 @@ 
Example
"label": [LabelInput], "telephone": [TelephoneInput], "email": [EmailInput], - "jabberId": ["xyz789"], + "jabberId": ["abc123"], "mailer": ["xyz789"], - "timeZone": ["abc123"], + "timeZone": ["xyz789"], "geo": [GeographicalPositionInput], - "title": ["xyz789"], - "role": ["xyz789"], + "title": ["abc123"], + "role": ["abc123"], "logo": [ImageInput], "agent": [AgentInput], "org": [OrganizationInput], "categories": [KeywordInput], "note": ["abc123"], - "prodId": ["abc123"], - "rev": ["abc123"], + "prodId": ["xyz789"], + "rev": ["xyz789"], "sortString": ["abc123"], "sound": [SoundInput], - "uid": ["xyz789"], - "url": ["xyz789"], - "desc": ["xyz789"], + "uid": ["abc123"], + "url": ["abc123"], + "desc": ["abc123"], "class": [PrivacyInput], "key": [KeyInput] } diff --git a/latest/index.html b/latest/index.html index 18d634ae9..7af33f02f 100644 --- a/latest/index.html +++ b/latest/index.html @@ -20,7 +20,7 @@ - + diff --git a/latest/listeners/listen-c2s/index.html b/latest/listeners/listen-c2s/index.html index bb40de135..c6fe90a00 100644 --- a/latest/listeners/listen-c2s/index.html +++ b/latest/listeners/listen-c2s/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/listeners/listen-components/index.html b/latest/listeners/listen-components/index.html index 9824f69f1..339b50e65 100644 --- a/latest/listeners/listen-components/index.html +++ b/latest/listeners/listen-components/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/listeners/listen-http/index.html b/latest/listeners/listen-http/index.html index 3e10db736..47186992b 100644 --- a/latest/listeners/listen-http/index.html +++ b/latest/listeners/listen-http/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/listeners/listen-s2s/index.html b/latest/listeners/listen-s2s/index.html index 7c70b12fc..963c80542 100644 --- a/latest/listeners/listen-s2s/index.html +++ b/latest/listeners/listen-s2s/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/3.1.1_3.2.0/index.html b/latest/migrations/3.1.1_3.2.0/index.html index 691a36cc8..ec5580706 100644 --- a/latest/migrations/3.1.1_3.2.0/index.html +++ b/latest/migrations/3.1.1_3.2.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/3.3.0_3.4.0/index.html b/latest/migrations/3.3.0_3.4.0/index.html index b4fc58a57..f9c50c54b 100644 --- a/latest/migrations/3.3.0_3.4.0/index.html +++ b/latest/migrations/3.3.0_3.4.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/3.5.0_3.6.0/index.html b/latest/migrations/3.5.0_3.6.0/index.html index 9c344ce4f..2d9b1929a 100644 --- a/latest/migrations/3.5.0_3.6.0/index.html +++ b/latest/migrations/3.5.0_3.6.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/3.6.0_3.7.0/index.html b/latest/migrations/3.6.0_3.7.0/index.html index e0a4dddf5..60f21e4db 100644 --- a/latest/migrations/3.6.0_3.7.0/index.html +++ b/latest/migrations/3.6.0_3.7.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/3.7.0_4.0.0/index.html b/latest/migrations/3.7.0_4.0.0/index.html index 652adbea4..22e3cce40 100644 --- a/latest/migrations/3.7.0_4.0.0/index.html +++ b/latest/migrations/3.7.0_4.0.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/4.0.0_4.0.1/index.html b/latest/migrations/4.0.0_4.0.1/index.html index f47b4b887..d383cfd18 100644 --- a/latest/migrations/4.0.0_4.0.1/index.html +++ b/latest/migrations/4.0.0_4.0.1/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/4.0.1_4.1.0/index.html b/latest/migrations/4.0.1_4.1.0/index.html index e408c747b..30f059691 100644 --- a/latest/migrations/4.0.1_4.1.0/index.html +++ b/latest/migrations/4.0.1_4.1.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/4.1.0_4.2.0/index.html b/latest/migrations/4.1.0_4.2.0/index.html index aca6a7d94..cbafad74a 100644 --- a/latest/migrations/4.1.0_4.2.0/index.html +++ b/latest/migrations/4.1.0_4.2.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/4.2.0_5.0.0/index.html b/latest/migrations/4.2.0_5.0.0/index.html index 9456bdfa3..011bdb532 100644 --- a/latest/migrations/4.2.0_5.0.0/index.html +++ b/latest/migrations/4.2.0_5.0.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/5.0.0_5.1.0/index.html b/latest/migrations/5.0.0_5.1.0/index.html index 7e26d665b..eacb906a3 100644 --- a/latest/migrations/5.0.0_5.1.0/index.html +++ b/latest/migrations/5.0.0_5.1.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/5.1.0_6.0.0/index.html b/latest/migrations/5.1.0_6.0.0/index.html index a6500ac06..fab505389 100644 --- a/latest/migrations/5.1.0_6.0.0/index.html +++ b/latest/migrations/5.1.0_6.0.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/6.0.0_6.1.0/index.html b/latest/migrations/6.0.0_6.1.0/index.html index 740ee06bd..97d74f2b0 100644 --- a/latest/migrations/6.0.0_6.1.0/index.html +++ b/latest/migrations/6.0.0_6.1.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/6.1.0_6.2.0/index.html b/latest/migrations/6.1.0_6.2.0/index.html index ea43b0ee0..14b129fd8 100644 --- a/latest/migrations/6.1.0_6.2.0/index.html +++ b/latest/migrations/6.1.0_6.2.0/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/6.2.0_6.2.1/index.html b/latest/migrations/6.2.0_6.2.1/index.html index 8b967c440..b8802a617 100644 --- a/latest/migrations/6.2.0_6.2.1/index.html +++ b/latest/migrations/6.2.0_6.2.1/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/6.2.1_x.x.x/index.html b/latest/migrations/6.2.1_x.x.x/index.html index cfbb93cd8..001b35349 100644 --- a/latest/migrations/6.2.1_x.x.x/index.html +++ b/latest/migrations/6.2.1_x.x.x/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/migrations/jid-from-mam-muc-script/index.html b/latest/migrations/jid-from-mam-muc-script/index.html index e45e62ed3..14298ecdc 100644 --- a/latest/migrations/jid-from-mam-muc-script/index.html +++ b/latest/migrations/jid-from-mam-muc-script/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_adhoc/index.html b/latest/modules/mod_adhoc/index.html index 7195862c1..71cadcc0d 100644 --- a/latest/modules/mod_adhoc/index.html +++ b/latest/modules/mod_adhoc/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_amp/index.html b/latest/modules/mod_amp/index.html index c68e9dc0a..d6b909773 100644 --- a/latest/modules/mod_amp/index.html +++ b/latest/modules/mod_amp/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_auth_token/index.html b/latest/modules/mod_auth_token/index.html index 3c6e4c3ae..9a6384c2b 100644 --- a/latest/modules/mod_auth_token/index.html +++ b/latest/modules/mod_auth_token/index.html @@ -22,7 +22,7 @@ - + @@ -4306,7 +4306,7 @@

modules.mod_auth_token.valid

Validity periods of access and refresh tokens can be defined independently - specifying one of them does not change the default value for the other one. Validity period configuration for provision tokens happens outside the module since the server does not generate provision tokens - it only validates them.

Required keys

-

To read more about the keys MongooseIM makes use of, please refer to mod_keystore documentation.

+

To read more about the keys MongooseIM makes use of, please refer to mod_keystore documentation, where you can find an example configuration when using mod_auth_token.

Token types

Three token types are supported:

    @@ -4314,7 +4314,8 @@

    Token types

    access tokens: These are short lived tokens which grants aren't tracked by the server (i.e. there's no need to store anything in a database). Access tokens can be used as a payload for the X-OAUTH authentication mechanism and grant access to the system. Access tokens can't be revoked. - An access token is valid only until its expiry date is reached.

    + An access token is valid only until its expiry date is reached. + In mod_keystore, the keyname for this token type is token_secret.

  • refresh tokens: These are longer lived tokens which are tracked by the server and therefore require persistent storage in a relational database. @@ -4322,14 +4323,16 @@

    Token types

    Also they can result in a new set of tokens being returned upon successful authentication. They can be revoked - if a refresh token hasn't been revoked, it is valid until it has expired. On revocation, it immediately becomes invalid. - As the server stores information about granted tokens, it can also persistently mark them as revoked.

    + As the server stores information about granted tokens, it can also persistently mark them as revoked. + In mod_keystore, the keyname for this token type is token_secret.

  • provision tokens: These tokens are generated by a service external to the server. They grant the owner a permission to create an account. A provision token may contain information which the server can use to provision the VCard for the newly created account. Using a provision token to create an account (and inject VCard data) is done similarly to other token types, i.e. by passing it as payload for the X-OAUTH mechanism. - The XMPP server has no way of tracking and revoking provision tokens, as they come from an outside source.

    + The XMPP server has no way of tracking and revoking provision tokens, as they come from an outside source. + In mod_keystore, the keyname for this token type is provision_pre_shared. The usage of this token type is optional.

Token serialization format

diff --git a/latest/modules/mod_bind2/index.html b/latest/modules/mod_bind2/index.html index ffca300de..270192847 100644 --- a/latest/modules/mod_bind2/index.html +++ b/latest/modules/mod_bind2/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_blocking/index.html b/latest/modules/mod_blocking/index.html index 7eae5269a..d9fb38ff4 100644 --- a/latest/modules/mod_blocking/index.html +++ b/latest/modules/mod_blocking/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_bosh/index.html b/latest/modules/mod_bosh/index.html index 2e112dfd3..dab326ca1 100644 --- a/latest/modules/mod_bosh/index.html +++ b/latest/modules/mod_bosh/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_cache_users/index.html b/latest/modules/mod_cache_users/index.html index 03f46a399..8383e3fc3 100644 --- a/latest/modules/mod_cache_users/index.html +++ b/latest/modules/mod_cache_users/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_caps/index.html b/latest/modules/mod_caps/index.html index 0dc51ba94..43c88e17f 100644 --- a/latest/modules/mod_caps/index.html +++ b/latest/modules/mod_caps/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_carboncopy/index.html b/latest/modules/mod_carboncopy/index.html index 116b05648..f37f67e41 100644 --- a/latest/modules/mod_carboncopy/index.html +++ b/latest/modules/mod_carboncopy/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_csi/index.html b/latest/modules/mod_csi/index.html index 9f955678a..ef93bb87d 100644 --- a/latest/modules/mod_csi/index.html +++ b/latest/modules/mod_csi/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_disco/index.html b/latest/modules/mod_disco/index.html index 5872ef496..9a5d98c1b 100644 --- a/latest/modules/mod_disco/index.html +++ b/latest/modules/mod_disco/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_domain_isolation/index.html b/latest/modules/mod_domain_isolation/index.html index c857112fa..2d3ad9c19 100644 --- a/latest/modules/mod_domain_isolation/index.html +++ b/latest/modules/mod_domain_isolation/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_event_pusher/index.html b/latest/modules/mod_event_pusher/index.html index b9a11e1c3..191903d6a 100644 --- a/latest/modules/mod_event_pusher/index.html +++ b/latest/modules/mod_event_pusher/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_event_pusher_http/index.html b/latest/modules/mod_event_pusher_http/index.html index 04d2a1c05..ed782912a 100644 --- a/latest/modules/mod_event_pusher_http/index.html +++ b/latest/modules/mod_event_pusher_http/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_event_pusher_push/index.html b/latest/modules/mod_event_pusher_push/index.html index b6a00c205..ce84f9456 100644 --- a/latest/modules/mod_event_pusher_push/index.html +++ b/latest/modules/mod_event_pusher_push/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_event_pusher_rabbit/index.html b/latest/modules/mod_event_pusher_rabbit/index.html index 9a96c2373..bcc124eed 100644 --- a/latest/modules/mod_event_pusher_rabbit/index.html +++ b/latest/modules/mod_event_pusher_rabbit/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_event_pusher_sns/index.html b/latest/modules/mod_event_pusher_sns/index.html index 92c3e87b3..5d7cd88df 100644 --- a/latest/modules/mod_event_pusher_sns/index.html +++ b/latest/modules/mod_event_pusher_sns/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_extdisco/index.html b/latest/modules/mod_extdisco/index.html index 810305d36..c63a3761b 100644 --- a/latest/modules/mod_extdisco/index.html +++ b/latest/modules/mod_extdisco/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_global_distrib/index.html b/latest/modules/mod_global_distrib/index.html index 6febe0961..6fde1a43d 100644 --- a/latest/modules/mod_global_distrib/index.html +++ b/latest/modules/mod_global_distrib/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_http_upload/index.html b/latest/modules/mod_http_upload/index.html index 1107e3743..a9be18c45 100644 --- a/latest/modules/mod_http_upload/index.html +++ b/latest/modules/mod_http_upload/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_inbox/index.html b/latest/modules/mod_inbox/index.html index 4e0f01973..9d8722bf8 100644 --- a/latest/modules/mod_inbox/index.html +++ b/latest/modules/mod_inbox/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_jingle_sip/index.html b/latest/modules/mod_jingle_sip/index.html index e630f23f4..01dad7a51 100644 --- a/latest/modules/mod_jingle_sip/index.html +++ b/latest/modules/mod_jingle_sip/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_keystore/index.html b/latest/modules/mod_keystore/index.html index 410cec74e..fd3807261 100644 --- a/latest/modules/mod_keystore/index.html +++ b/latest/modules/mod_keystore/index.html @@ -22,7 +22,7 @@ - + @@ -4206,6 +4206,11 @@

Example Configuration

{name = "access_psk", type = "file", path = "priv/second_access_psk"}, {name = "provision_psk", type = "file", path = "priv/second_provision_psk"}]

+

Minimal configuration supporting mod_auth_token:

+
1
+2
[modules.mod_keystore]
+  keys = [{name = "token_secret", type = "ram"}]
+
diff --git a/latest/modules/mod_last/index.html b/latest/modules/mod_last/index.html index 7ac8dd563..bd3574e29 100644 --- a/latest/modules/mod_last/index.html +++ b/latest/modules/mod_last/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_mam/index.html b/latest/modules/mod_mam/index.html index fdf418d06..6ec9baea6 100644 --- a/latest/modules/mod_mam/index.html +++ b/latest/modules/mod_mam/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_muc/index.html b/latest/modules/mod_muc/index.html index d136bb202..e84c0f758 100644 --- a/latest/modules/mod_muc/index.html +++ b/latest/modules/mod_muc/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_muc_light/index.html b/latest/modules/mod_muc_light/index.html index 55e9f15a3..14d19d9a6 100644 --- a/latest/modules/mod_muc_light/index.html +++ b/latest/modules/mod_muc_light/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_muc_log/index.html b/latest/modules/mod_muc_log/index.html index c0d12efd3..be4b6e1a9 100644 --- a/latest/modules/mod_muc_log/index.html +++ b/latest/modules/mod_muc_log/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_offline/index.html b/latest/modules/mod_offline/index.html index 317a88522..230319a30 100644 --- a/latest/modules/mod_offline/index.html +++ b/latest/modules/mod_offline/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_offline_stub/index.html b/latest/modules/mod_offline_stub/index.html index 1e78a6e56..1658e4a6c 100644 --- a/latest/modules/mod_offline_stub/index.html +++ b/latest/modules/mod_offline_stub/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_ping/index.html b/latest/modules/mod_ping/index.html index c332e3b05..0e1d5e897 100644 --- a/latest/modules/mod_ping/index.html +++ b/latest/modules/mod_ping/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_presence/index.html b/latest/modules/mod_presence/index.html index 746981bde..262b4ce0a 100644 --- a/latest/modules/mod_presence/index.html +++ b/latest/modules/mod_presence/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_privacy/index.html b/latest/modules/mod_privacy/index.html index e68fde11c..cd4352666 100644 --- a/latest/modules/mod_privacy/index.html +++ b/latest/modules/mod_privacy/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_private/index.html b/latest/modules/mod_private/index.html index 2b84e6cae..e6aa1f691 100644 --- a/latest/modules/mod_private/index.html +++ b/latest/modules/mod_private/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_pubsub/index.html b/latest/modules/mod_pubsub/index.html index 7f67a208d..7af5c8552 100644 --- a/latest/modules/mod_pubsub/index.html +++ b/latest/modules/mod_pubsub/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_push_service_mongoosepush/index.html b/latest/modules/mod_push_service_mongoosepush/index.html index 9a591d950..36744ee2e 100644 --- a/latest/modules/mod_push_service_mongoosepush/index.html +++ b/latest/modules/mod_push_service_mongoosepush/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_register/index.html b/latest/modules/mod_register/index.html index 6aeba3b7c..6bfed6811 100644 --- a/latest/modules/mod_register/index.html +++ b/latest/modules/mod_register/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_roster/index.html b/latest/modules/mod_roster/index.html index 1e05301d6..dfac0232d 100644 --- a/latest/modules/mod_roster/index.html +++ b/latest/modules/mod_roster/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_sasl2/index.html b/latest/modules/mod_sasl2/index.html index d0404a1fa..9d49a4306 100644 --- a/latest/modules/mod_sasl2/index.html +++ b/latest/modules/mod_sasl2/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_shared_roster_ldap/index.html b/latest/modules/mod_shared_roster_ldap/index.html index 8796f5fd2..823c4d5ca 100644 --- a/latest/modules/mod_shared_roster_ldap/index.html +++ b/latest/modules/mod_shared_roster_ldap/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_sic/index.html b/latest/modules/mod_sic/index.html index e0436622e..591ad308c 100644 --- a/latest/modules/mod_sic/index.html +++ b/latest/modules/mod_sic/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_smart_markers/index.html b/latest/modules/mod_smart_markers/index.html index 2e5d0e938..3bb6985e4 100644 --- a/latest/modules/mod_smart_markers/index.html +++ b/latest/modules/mod_smart_markers/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_stream_management/index.html b/latest/modules/mod_stream_management/index.html index 493edaf7e..f54372d74 100644 --- a/latest/modules/mod_stream_management/index.html +++ b/latest/modules/mod_stream_management/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_time/index.html b/latest/modules/mod_time/index.html index 8f325c3dd..93d45ab49 100644 --- a/latest/modules/mod_time/index.html +++ b/latest/modules/mod_time/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_vcard/index.html b/latest/modules/mod_vcard/index.html index e887c1586..2fead67c1 100644 --- a/latest/modules/mod_vcard/index.html +++ b/latest/modules/mod_vcard/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/modules/mod_version/index.html b/latest/modules/mod_version/index.html index 517d7c3a6..5f4f4b794 100644 --- a/latest/modules/mod_version/index.html +++ b/latest/modules/mod_version/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/open-extensions/inbox/index.html b/latest/open-extensions/inbox/index.html index 0ce036c29..d990dd13c 100644 --- a/latest/open-extensions/inbox/index.html +++ b/latest/open-extensions/inbox/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/open-extensions/mam/index.html b/latest/open-extensions/mam/index.html index ead2b4b65..0c4fbe31a 100644 --- a/latest/open-extensions/mam/index.html +++ b/latest/open-extensions/mam/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/open-extensions/muc_light/index.html b/latest/open-extensions/muc_light/index.html index 81071f73b..e446601d3 100644 --- a/latest/open-extensions/muc_light/index.html +++ b/latest/open-extensions/muc_light/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/open-extensions/smart-markers/index.html b/latest/open-extensions/smart-markers/index.html index c1b62105e..46b44b718 100644 --- a/latest/open-extensions/smart-markers/index.html +++ b/latest/open-extensions/smart-markers/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/open-extensions/token-reconnection/index.html b/latest/open-extensions/token-reconnection/index.html index 5219383da..632616cb8 100644 --- a/latest/open-extensions/token-reconnection/index.html +++ b/latest/open-extensions/token-reconnection/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/Cluster-configuration-and-node-management/index.html b/latest/operation-and-maintenance/Cluster-configuration-and-node-management/index.html index ca6ab8e4b..855860c9a 100644 --- a/latest/operation-and-maintenance/Cluster-configuration-and-node-management/index.html +++ b/latest/operation-and-maintenance/Cluster-configuration-and-node-management/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/Cluster-management-considerations/index.html b/latest/operation-and-maintenance/Cluster-management-considerations/index.html index ee44d9633..2dab49b45 100644 --- a/latest/operation-and-maintenance/Cluster-management-considerations/index.html +++ b/latest/operation-and-maintenance/Cluster-management-considerations/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/Cluster-restart/index.html b/latest/operation-and-maintenance/Cluster-restart/index.html index 8ec516169..da038c188 100644 --- a/latest/operation-and-maintenance/Cluster-restart/index.html +++ b/latest/operation-and-maintenance/Cluster-restart/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/Humio/index.html b/latest/operation-and-maintenance/Humio/index.html index 400b2fc60..cad0c9870 100644 --- a/latest/operation-and-maintenance/Humio/index.html +++ b/latest/operation-and-maintenance/Humio/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/Logging-&-monitoring/index.html b/latest/operation-and-maintenance/Logging-&-monitoring/index.html index ab8748fbb..3a2dc4cfa 100644 --- a/latest/operation-and-maintenance/Logging-&-monitoring/index.html +++ b/latest/operation-and-maintenance/Logging-&-monitoring/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/Logging-fields/index.html b/latest/operation-and-maintenance/Logging-fields/index.html index 051f87f39..2081b6ddf 100644 --- a/latest/operation-and-maintenance/Logging-fields/index.html +++ b/latest/operation-and-maintenance/Logging-fields/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/Logging/index.html b/latest/operation-and-maintenance/Logging/index.html index f298d582a..69f76e8bd 100644 --- a/latest/operation-and-maintenance/Logging/index.html +++ b/latest/operation-and-maintenance/Logging/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/MongooseIM-metrics/index.html b/latest/operation-and-maintenance/MongooseIM-metrics/index.html index 3b76122f1..bb1085326 100644 --- a/latest/operation-and-maintenance/MongooseIM-metrics/index.html +++ b/latest/operation-and-maintenance/MongooseIM-metrics/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/Rolling-upgrade/index.html b/latest/operation-and-maintenance/Rolling-upgrade/index.html index 60f916f74..87b10f786 100644 --- a/latest/operation-and-maintenance/Rolling-upgrade/index.html +++ b/latest/operation-and-maintenance/Rolling-upgrade/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/System-Metrics-Privacy-Policy/index.html b/latest/operation-and-maintenance/System-Metrics-Privacy-Policy/index.html index e7a4e9e60..76ce43c08 100644 --- a/latest/operation-and-maintenance/System-Metrics-Privacy-Policy/index.html +++ b/latest/operation-and-maintenance/System-Metrics-Privacy-Policy/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/gdpr-considerations/index.html b/latest/operation-and-maintenance/gdpr-considerations/index.html index d6a2a3116..7bfac3756 100644 --- a/latest/operation-and-maintenance/gdpr-considerations/index.html +++ b/latest/operation-and-maintenance/gdpr-considerations/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/known-issues/index.html b/latest/operation-and-maintenance/known-issues/index.html index 27e296b8f..ffcb80e61 100644 --- a/latest/operation-and-maintenance/known-issues/index.html +++ b/latest/operation-and-maintenance/known-issues/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/operation-and-maintenance/tls-distribution/index.html b/latest/operation-and-maintenance/tls-distribution/index.html index 2878bc9d3..e016e133f 100644 --- a/latest/operation-and-maintenance/tls-distribution/index.html +++ b/latest/operation-and-maintenance/tls-distribution/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/rest-api/Administration-backend/index.html b/latest/rest-api/Administration-backend/index.html index 808bf9065..dac055cab 100644 --- a/latest/rest-api/Administration-backend/index.html +++ b/latest/rest-api/Administration-backend/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/rest-api/Client-frontend/index.html b/latest/rest-api/Client-frontend/index.html index 2d13c44c9..249edc02f 100644 --- a/latest/rest-api/Client-frontend/index.html +++ b/latest/rest-api/Client-frontend/index.html @@ -22,7 +22,7 @@ - + diff --git a/latest/search/search_index.json b/latest/search/search_index.json index 062eec175..7a3284d42 100644 --- a/latest/search/search_index.json +++ b/latest/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"MongooseIM Documentation","text":"
  • Home: https://github.com/esl/MongooseIM
  • Product page: https://www.erlang-solutions.com/products/mongooseim.html
  • Documentation: https://esl.github.io/MongooseDocs/
  • Try it now: https://trymongoose.im
"},{"location":"#get-to-know-mongooseim","title":"Get to know MongooseIM","text":"

MongooseIM is a robust, scalable and efficient XMPP server at the core of an Instant Messaging platform aimed at large installations.

Designed for enterprise, it is fault-tolerant, can utilise the resources of multiple clustered machines, and easily scales for more capacity by simply adding a box or a VM.

MongooseIM can accept client sessions over vanilla XMPP, GraphQL API, REST API and SSE, as well as Websockets and BOSH (HTTP long-polling).

As a platform, MongooseIM includes several server-side (backend) and client-side (frontend) components. We provide a test suite, metrics, a load testing platform, and a monitoring server. We recommend third-party, open source client libraries for XMPP, GraphQL API and REST API.

MongooseIM is brought to you by Erlang Solutions.

"},{"location":"#mongooseim-platform-components","title":"MongooseIM platform components","text":""},{"location":"#server-side-components","title":"Server-side components","text":"

We offer a set of additional server-side components:

  • WombatOAM is a powerful monitoring platform that comes with a dedicated MongooseIM plugin
  • mongoose_metrics is a internal module that provides various metrics about the server, if you use WombatOAM with MongooseIM plugin then you will see them otherwise you can use e.g. InfluxDB and Grafana to store and visualize them
  • Test suite is written with the help of these useful tools:
    • escalus: an XMPP client for Erlang
    • amoc: a load testing tool
  • MongooseICE: is a STUN and TURN server written for traversing NATs and relaying streams
  • MongoosePush: is a flexible push notification server with APNS and FCM support
"},{"location":"#client-side-components","title":"Client-side components","text":"
  • XMPP client libraries - we recommend the following client libraries:
    • iOS, Objective-C: XMPPFramework
    • Android, Java: Smack
    • Web, JavaScript: Stanza.io, Strophe.js
  • REST API client libraries - we recommend the following client libraries:
    • iOS, Swift: Jayme
    • Android, Java: Retrofit
"},{"location":"#download-packages","title":"Download packages","text":"

For a quick start just download:

  • The pre-built packages that suit your platform (Ubuntu, Debian, CentOS compatible: AlmaLinux and Rocky Linux)
  • The Docker image (source code repository)
  • The Helm chart (source code repository)

See the installation guide for more details.

"},{"location":"#public-testing","title":"Public testing","text":"

Check out our test results:

  • CI testing:
    • GH Actions
    • CircleCI
  • Code coverage:
    • Codecov - reported by CircleCI.
    • Coveralls - reported by GH Actions.
"},{"location":"#versions","title":"Versions","text":"

See the documentation for the latest releases:

  • Master
  • 6.2.1
  • 6.2.0
  • 6.1.0
  • 6.0.0
  • 5.1.0
  • 5.0.0
  • 4.2.0
  • 4.1.0
  • 4.0.1
  • 3.7.1
  • 3.6.2
  • 3.5.0
  • 3.4.1
  • 3.3.0
  • 3.2.0
  • 3.1.1
  • 3.0.1
"},{"location":"#participate","title":"Participate!","text":"

Suggestions, questions, thoughts? Contact us directly:

  • Raise a GitHub issue
  • Email us at mongoose-im@erlang-solutions.com
  • Follow our Twitter account
"},{"location":"Contributions/","title":"Contributions to the Ecosystem","text":"

Our contributions to the ecosystem.

"},{"location":"Contributions/#third-party-opensource-projects","title":"Third-party opensource projects","text":""},{"location":"Contributions/#xmppframework-for-ios","title":"XMPPFramework for iOS","text":"

Available on: robbiehanson/XMPPFramework

  • XEP-0363: HTTP File Upload
  • XEP-0313: Message Archive Management
  • XEP-0030: Service Discovery
  • MUC light
  • Token-based reconnection
  • Revamped README: making people feel like this is a well maintained and up to date framework
  • Created a way to Mock a piece of the framework to improve the way we write tests
"},{"location":"Contributions/#smack-for-android","title":"Smack for Android","text":"

Available on: igniterealtime/Smack

  • XEP-0357: Push Notifications
  • XEP-0191: Blocking Command
  • XEP-0313: Message Archive Management
  • XEP-0308: Last Message Correction
  • MUC light
  • Token-based reconnection
  • Instant Stream Resumption
  • XEP-0231: Bits of Binary
  • XEP-0333: Chat Markers
  • MAM documentation
"},{"location":"Contributions/#movim","title":"Movim","text":"

See movim/movim on GitHub for more details.

  • Docker image for Movim
"},{"location":"Contributions/#software-by-erlang-solutions","title":"Software by Erlang Solutions","text":""},{"location":"Contributions/#escalus","title":"escalus","text":"

See esl/escalus on GitHub for more details.

An XMPP client library in Erlang for conveniently testing XMPP servers

Apache license 2.0

"},{"location":"Contributions/#amoc","title":"amoc","text":"

See esl/amoc on GitHub for more details.

amoc is a simple tool for running massively parallel XMPP tests

Apache license 2.0

Info

amoc stands for \"A Murder of Crows\"

"},{"location":"Contributions/#amoc-arsenal-xmpp","title":"amoc-arsenal-xmpp","text":"

See esl/amoc-arsenal-xmpp on GitHub for more details.

A collection of scenarios for amoc, which we use to test MongooseIM. They can however be used to load test any XMPP server.

Apache license 2.0

"},{"location":"Contributions/#exml","title":"exml","text":"

See esl/exml on GitHub for more details.

XML parsing library in Erlang

Apache license 2.0

"},{"location":"Contributions/#mongooseice-ice-stunturn-server","title":"MongooseICE: ICE (STUN/TURN) server","text":"

See MongooseICE on GitHub for more details.

"},{"location":"Contributions/#mongoosepush-push-notifications-server-apnsfcm","title":"MongoosePush: Push notifications server (APNS/FCM)","text":"

See MongoosePush on GitHub for more details.

"},{"location":"Contributions/#open-standards","title":"Open standards","text":""},{"location":"Contributions/#muc-light","title":"MUC light","text":"

MUC stands for Multi-User Chat. MUC light is a presenceless and subscription-based group chat, relying on a simplified version of MUC.

"},{"location":"Contributions/#token-based-reconnection","title":"Token-based reconnection","text":"

Token-based reconnection (TBR) Reconnection mechanism, for temporary disconnections, using tokens instead of passwords

"},{"location":"History/","title":"MongooseIM history","text":""},{"location":"History/#2023-2024-c2s-and-cets","title":"2023-2024: C2S and CETS","text":"

Created an alternative to Mnesia RAM-only tables - CETS. It allows to run MongooseIM without Mnesia completely in RDBMS+CETS setup.

Moved the C2S implementation to state machine. Added Docker image for arm64.

Enhanced CETS, configurable pools, and traffic shaping updates.

Releases:

  • MongooseIM 6.2.1 in April 2024.
  • MongooseIM 6.2.0 in December 2023.
  • MongooseIM 6.1.0 in May 2023.
"},{"location":"History/#2022-graphql","title":"2022: GraphQL","text":"

New GraphQL API allows to access MongooseIM using HTTP protocol to extract data and make changes in a flexible way. The command-line interface (CLI) has been reworked to match the GraphQL functionality. The configuration for the admin and the client API has been simplified.

Release:

  • MongooseIM 6.0.0 in December 2022.
"},{"location":"History/#2020-2021-friendly-cloud-native-and-dynamic","title":"2020-2021: Friendly, cloud-native and dynamic","text":"

With the new configuration format, improved logging, and many more changes, MongooseIM has become more friendly for DevOps than ever before. This goes hand in hand with the prioritisation of solutions that enable MongooseIM to be easily deployed to the cloud.

Whether in the cloud or on-premise, it is now possible to have a multi-tenant setup, powered by the new dynamic XMPP domains feature. It means thousands of domains can be simply set up, managed, and removed dynamically, without a noticeable performance overhead.

Releases:

  • MongooseIM 5.1.0 in June 2022.
  • MongooseIM 5.0.0 in October 2021.
  • MongooseIM 4.2.0 in April 2021.
  • MongooseIM 4.1.0 in February 2021.
  • MongooseIM 4.0.0 in September 2020.
  • MongooseIM 3.7.0 in May 2020.
  • MongooseIM 3.6.0 in January 2020.
"},{"location":"History/#2018-2019-global-distribution-ready","title":"2018-2019: Global distribution ready","text":"
  • Focus on global scale architecture.
  • Chat bot integrations.
  • Optimizations for IoT clients.
  • GDPR compliance.
  • New XML parser exml.

Releases:

  • MongooseIM 3.5.0 in October 2019.
  • MongooseIM 3.4.0 in June 2019.
  • MongooseIM 3.3.0 in March 2019.
  • MongooseIM 3.2.0 in November 2018.
  • MongooseIM 3.1.1 in July 2018.
  • MongooseIM 3.0.1 in May 2018.
  • MongooseIM 2.2.2 in April 2018.
  • MongooseIM 2.1.1 in January 2018.
"},{"location":"History/#2017-platform-expansion-and-strengthening","title":"2017: Platform expansion and strengthening","text":"

MongooseIM 2.1.0 in October 2017.

New components were added to the MongooseIM platform:

  • MongoosePush, push notifications server
  • MongooseICE, ICE server to help with voice calls functionality
  • Mangosta iOS, demo XMPP client application for iOS
  • Mangosta Android, demo XMPP client application for Android
"},{"location":"History/#2016-pivot-to-fullstack-messaging-platform","title":"2016: Pivot to fullstack messaging platform","text":"

MongooseIM Platform was created, that included a list of components:

  • MongooseIM XMPP server 2.0.0, featuring a unique REST API for client developers and MUC light
  • WombatOAM, for monitoring and operations
  • escalus, an Erlang XMPP client for test automation
  • amoc, for load generation
  • Smack for Android in Java (third party)
  • XMPPFramework for iOS in Objective-C (third party)
  • Retrofit by Square for Android in Java (third party)
  • Jayme by Inaka for iOS in Swift
"},{"location":"History/#2012-2015-fully-independent-project-growing-fast","title":"2012-2015: Fully independent project growing fast","text":"
  • Full OTP and rebar compliance.
  • Removal of obsolete and/or rarely used modules.
  • Reduction of the runtime memory consumption and functional test coverage.
  • Added Message Archive Management support (XEP-0313).

Releases:

  • MongooseIM 1.6.x in October 2015.
  • MongooseIM 1.5.x in December 2014.
  • MongooseIM 1.4.x in May 2014.
  • MongooseIM 1.3.x in January 2014.
  • MongooseIM 1.2.x in May 2013.
  • MongooseIM 1.1.x in December 2012.
  • MongooseIM 1.0.0 in July 2012.
"},{"location":"History/#2011-fork-of-ejabberd","title":"2011: Fork of ejabberd","text":"

This project began its life as a fork of ejabberd v.2.1.8.

Version 0.1.0 included:

  • Replaced strings with binaries to significantly reduce memory consumption.
  • Refactored directory structure of the project to be OTP complient.
  • Replaced autotools with the rebar build tool.
  • Removed obsolete and/or rarely used modules to reduce maintenance burden.
  • Added functional tests based on RFCs and XEPs.
"},{"location":"authentication-methods/anonymous/","title":"Anonymous","text":""},{"location":"authentication-methods/anonymous/#overview","title":"Overview","text":"

This authentication method allows the users to connect anonymously.

"},{"location":"authentication-methods/anonymous/#configuration-options","title":"Configuration options","text":""},{"location":"authentication-methods/anonymous/#authanonymousallow_multiple_connections","title":"auth.anonymous.allow_multiple_connections","text":"
  • Syntax: boolean
  • Default: false
  • Example: allow_multiple_connections = true

When set to true, allows multiple connections from the same JID using the anonymous authentication method.

"},{"location":"authentication-methods/anonymous/#authanonymousprotocol","title":"auth.anonymous.protocol","text":"
  • Syntax: string, one of \"sasl_anon\", \"login_anon\", \"both\"
  • Default: sasl_anon
  • Example: protocol = \"both\"

Specifies the SASL mechanisms supported by the anonymous authentication method:

  • sasl_anon - support only the the ANONYMOUS mechanism,
  • login_anon - support the non-anonymous mechanisms (PLAIN, DIGEST-MD5, SCRAM-*),
  • both - support both types of mechanisms.
"},{"location":"authentication-methods/anonymous/#authanonymousbackend","title":"auth.anonymous.backend","text":"
  • Syntax: string, one of mnesia, cets
  • Default: mnesia
  • Example: backend = cets

Sets the backend where anonymous sessions will be stored in-memory. See internal databases

"},{"location":"authentication-methods/anonymous/#example","title":"Example","text":"
[auth.anonymous]\n  allow_multiple_connections = true\n  protocol = \"both\"\n
"},{"location":"authentication-methods/dummy/","title":"Dummy","text":""},{"location":"authentication-methods/dummy/#overview","title":"Overview","text":"

The purpose of this method is to make it possible to authenticate a user without the need for real authentication. In other words, using this module allows to connect any user to the server without providing any password, certificate, etc.

This kind of authorization sometimes really comes in handy, especially during development and testing.

The backend just accepts every authentication attempt and introduces a random delay (50-500ms) to an authorization response. The delay works like

    timer:sleep(Base + rand:uniform(Variance)),\n
where Base is base_time and Variance is variance, as configured below.

"},{"location":"authentication-methods/dummy/#configuration","title":"Configuration","text":""},{"location":"authentication-methods/dummy/#authdummybase_time","title":"auth.dummy.base_time","text":"
  • Syntax: non-negative integer
  • Default: 50
  • Example: base_time = 5
"},{"location":"authentication-methods/dummy/#authdummyvariance","title":"auth.dummy.variance","text":"
  • Syntax: positive integer
  • Default: 450
  • Example: variance = 10
"},{"location":"authentication-methods/dummy/#example","title":"Example","text":"
[auth.dummy]\n  base = 5\n  variance = 10\n
"},{"location":"authentication-methods/external/","title":"External","text":""},{"location":"authentication-methods/external/#overview","title":"Overview","text":"

This authentication method delegates the authentication to an external script.

It uses the SASL PLAIN mechanism.

"},{"location":"authentication-methods/external/#script-api-specification","title":"Script API specification","text":"

All \"commands\" sent from Erlang VM to the script are prefixed with a 2-byte unsigned integer (command length), MSB first. The script is expected to return responses in the same format.

Currently only 2 response packets are supported:

  • 0x0000 = false (for failure).
  • 0x0001 = true (for success).

The following list describes packets that the script should support.

  • auth:<username>:<domain>:<password> - Check password.
  • setpass:<username>:<domain>:<password> - Set password.
  • tryregister:<username>:<domain>:<password> - Register a user.
  • removeuser:<username>:<domain> - Remove a user.
  • isuser:<username>:<domain> - Check if a user exists.
"},{"location":"authentication-methods/external/#configuration-options","title":"Configuration options","text":""},{"location":"authentication-methods/external/#authexternalprogram","title":"auth.external.program","text":"
  • Syntax: string
  • Default: no default, this option is mandatory for the external authentication method
  • Example: program = \"/usr/bin/auth-script.sh\"

Path to the external authentication program.

"},{"location":"authentication-methods/external/#authexternalinstances","title":"auth.external.instances","text":"
  • Syntax: positive integer
  • Default: 1
  • Example: instances = 2

Specifies the number of workers serving external authentication requests.

"},{"location":"authentication-methods/external/#example","title":"Example","text":"
[auth.external]\n  program = \"/home/user/authenticator\"\n  instances = 5\n
"},{"location":"authentication-methods/http/","title":"HTTP","text":""},{"location":"authentication-methods/http/#overview","title":"Overview","text":"

The purpose of this method is to connect to an external REST API and delegate the authentication operations to it. The component must implement the API described below.

This method can be especially useful when the user database is shared with other services. It fits perfectly when the client application uses a custom authentication token and MongooseIM has to validate it externally.

"},{"location":"authentication-methods/http/#configuration-options","title":"Configuration options","text":"

The auth method uses an outgoing HTTP connection pool called auth, which has to be defined in the outgoing_pools section.

For additional configuration, the following options can be provided in the auth section:

"},{"location":"authentication-methods/http/#authhttpbasic_auth","title":"auth.http.basic_auth","text":"
  • Syntax: string
  • Default: not set
  • Example: basic_auth = \"admin:secret\"

Optional HTTP Basic Authentication in format \"username:password\" - used to authenticate MongooseIM in the HTTP service.

"},{"location":"authentication-methods/http/#example","title":"Example","text":"

Authentication:

[auth.http]\n  basic_auth = \"mongooseim:DzviNQw3qyGJDrJDu+ClyA\"\n

Outgoing pools:

[outgoing_pools.http.auth]\n  connection.host = \"https://auth-service:8000\"\n
"},{"location":"authentication-methods/http/#scram-support","title":"SCRAM support","text":"

The http method can use the SASL SCRAM-* mechanisms. When SCRAM is enabled, the passwords sent to the auth service are serialised and the same serialised format is expected when fetching a password from the component.

It is transparent when MongooseIM is responsible for all DB operations such as password setting, account creation etc.

The service CAN perform the (de)serialization of SCRAM-encoded passwords. You can find more details on the SCRAM serialization page.

"},{"location":"authentication-methods/http/#authentication-service-api","title":"Authentication service API","text":""},{"location":"authentication-methods/http/#url-format","title":"URL format","text":"

All GET requests include the following URL-encoded query string: ?user=<username>&server=<domain>&pass=<password>.

All POST requests have the following URL-encoded string in the request body: user=<username>&server=<domain>&pass=<password>.

If a certain method does not need a password, the value of pass is undefined, so it shouldn't be used.

"},{"location":"authentication-methods/http/#return-codes","title":"Return codes","text":"

For the best integration, the return code range should not exceed the list below:

  • 500 - internal server error
  • 409 - conflict
  • 404 - not found
  • 403 - not allowed
  • 401 - not authorised
  • 400 - other error, should be sent in response body
  • 204 - success, no return data
  • 201 - created
  • 200 - success, return value in response body

Whenever the specification says \"anything else\", service should use one of the codes from the list above.

Some requests consider multiple return codes a \"success\". It is up to the server-side developer to pick one of the codes.

"},{"location":"authentication-methods/http/#http-header-content-length","title":"HTTP header Content-Length","text":"

IMPORTANT: The authentication server MUST include a Content-Length HTTP header in the response. A body can be missing in the first data chunk read from a socket, leading to strange authentication errors.

"},{"location":"authentication-methods/http/#method-register","title":"Method register","text":"
  • Description: Creates a user account.
  • HTTP method: POST
  • Type: mandatory when mod_register is enabled
  • Return values:
    • 201 - success
    • 409 - user already exists
    • anything else - will be treated as failure
"},{"location":"authentication-methods/http/#method-check_password","title":"Method check_password","text":"
  • Description: Must respond if the password is valid for the user.
  • HTTP method: GET
  • Type: mandatory when SCRAM is not used
  • Return values:
    • 200, true or false in the body
    • anything else - will be treated as false
"},{"location":"authentication-methods/http/#method-get_password","title":"Method get_password","text":"
  • Description: Must return the user's password in plaintext or in the SCRAM serialised form.
  • HTTP method: GET
  • Type: mandatory when SCRAM or DIGEST SASL mechanism is used
  • Return values:
    • 200, password in the body
    • anything else - get_password will fail
"},{"location":"authentication-methods/http/#method-get_certs","title":"Method get_certs","text":"
  • Description: Must return all the valid certificates of a user in the PEM format.
  • HTTP method: GET
  • Type: mandatory when EXTERNAL SASL mechanism is used
  • Return values:
    • 200, all the user's certificates listed one after another (as in a PEM file)
    • anything else - get_certs will fail
"},{"location":"authentication-methods/http/#method-user_exists","title":"Method user_exists","text":"
  • Description: Must return the information whether the user exists in DB.
  • HTTP method: GET
  • Type: mandatory
  • Return values:
    • 200, true or false in body
    • anything else - will be treated as false
"},{"location":"authentication-methods/http/#method-set_password","title":"Method set_password","text":"
  • Description: Must set user's password in the internal database to a provided value. The value should not be transformed (except for URL-decoding) before writing into the DB.
  • HTTP method: POST
  • Type: mandatory when mod_register is enabled
  • Return values:
    • 200 or 201 or 204 - success
    • anything else - will be treated as false
"},{"location":"authentication-methods/http/#method-remove_user","title":"Method remove_user","text":"
  • Description: Removes a user account.
  • HTTP method: POST
  • Type: mandatory when mod_register is enabled
  • Return values:
    • 200 or 201 or 204 - success
    • 404 - user does not exist
    • 403 - not allowed for some reason
    • 40X - will be treated as bad request
"},{"location":"authentication-methods/http/#authentication-service-api-recipes","title":"Authentication service API recipes","text":"

Below you can find some examples of the auth service APIs and MongooseIM-side configuration along with use cases.

"},{"location":"authentication-methods/http/#system-using-a-common-custom-auth-token","title":"System using a common, custom auth token","text":"

An Auth token is provided as a password.

  • Service implements: check_password, user_exists
  • MongooseIM config: password.format: plain, mod_register disabled
  • Client side: Must NOT use the DIGEST-MD5 mechanism; use PLAIN instead
"},{"location":"authentication-methods/http/#central-database-of-plaintext-passwords","title":"Central database of plaintext passwords","text":"
  • Service implements: check_password, get_password, user_exists
  • MongooseIM config: password.format: plain, mod_register disabled
  • Client side: May use any available SASL mechanism
"},{"location":"authentication-methods/http/#central-database-able-to-process-scram","title":"Central database able to process SCRAM","text":"
  • Service implements: get_password, user_exists
  • MongooseIM config: password.format: scram, mod_register disabled
  • Client side: May use any available SASL mechanism
"},{"location":"authentication-methods/http/#godlike-mongooseim","title":"Godlike MongooseIM","text":"
  • Service implements: all methods
  • MongooseIM config: password.format: scram (recommended) or plain, mod_register enabled
  • Client side: May use any available SASL mechanism
"},{"location":"authentication-methods/jwt/","title":"JWT","text":""},{"location":"authentication-methods/jwt/#overview","title":"Overview","text":"

This authentication method can verify JSON Web Tokens provided by the clients. A wide range of signature algorithms is supported, including those using public key cryptography.

The module checks the signature and validity of the following parameters:

  • exp - an expired token is rejected,
  • iat - a token must be issued in the past,
  • nbf - a token might not be valid yet.

It requires the SASL PLAIN mechanism listed in sasl_mechanisms.

"},{"location":"authentication-methods/jwt/#configuration-options","title":"Configuration options","text":""},{"location":"authentication-methods/jwt/#authjwtsecret","title":"auth.jwt.secret","text":"
  • Syntax: TOML table with exactly one of the possible items listed below:
    • file - string, path to the file with the JWT secret,
    • env- string, environment variable name with the JWT secret,
    • value - string, the JWT secret value.
  • Default: no default, this option is mandatory
  • Example: secret.env = \"JWT_SECRET\"

This is the JWT secret used for the authentication. You can store it in a file, as an environment variable or specify it directly.

"},{"location":"authentication-methods/jwt/#authjwtalgorithm","title":"auth.jwt.algorithm","text":"
  • Syntax: string, one of: \"HS256\", \"RS256\", \"ES256\", \"HS386\", \"RS386\", \"ES386\", \"HS512\", \"RS512\", \"ES512\"
  • Default: no default, this option is mandatory
  • Example: algorithm = \"HS512\"

Name of the algorithm used to sign the JWT.

"},{"location":"authentication-methods/jwt/#authjwtusername_key","title":"auth.jwt.username_key","text":"
  • Syntax: string
  • Default: no default, this option is mandatory
  • Example: username_key = \"user_name\"

Name of the JWT key that contains the user name to verify.

"},{"location":"authentication-methods/jwt/#example","title":"Example","text":"
[auth.jwt]\n  secret.value = \"top-secret123\"\n  algorithm = \"HS256\"\n  username_key = \"user\"\n
"},{"location":"authentication-methods/ldap/","title":"LDAP","text":""},{"location":"authentication-methods/ldap/#overview","title":"Overview","text":"

This authentication method provides a read-only abstraction over an LDAP directory.

The following SASL mechanisms are supported:

"},{"location":"authentication-methods/ldap/#sasl-external","title":"SASL EXTERNAL","text":"

User credentials are verified by performing an LDAP search with the user name provided by the client. This can be used to verify that the user is allowed to log in after the provided certificate has been verified.

This method requires one connection pool with the default tag (unless you change it with the pool_tag option). You need to provide the root DN and password unless your LDAP password allows anonymous searches.

Example:

[outgoing_pools.ldap.default]\n  workers = 5\n  connection.servers = [\"ldap-server.example.com\"]\n  connection.rootdn = \"cn=admin,dc=example,dc=com\"\n  connection.password = \"ldap-admin-password\"\n

For more details see outgoing connections.

"},{"location":"authentication-methods/ldap/#sasl-plain","title":"SASL PLAIN","text":"

User credentials are verified by performing an LDAP search followed by a bind with the user name and password provided by the client.

To use SASL PLAIN, you need to configure two connection pools:

  • one with the default tag (unless you change it with the pool_tag option) for the search operations (like for SASL EXTERNAL),
  • one with the bind tag (unless you change it with the bind_pool_tag option) for the bind operations - for this one it is not necessary to provide the root DN and password as the bind operations will be performed with users' credentials. This pool has to be used exclusively for the bind operations as the authentication state of the connection changes with each request.

Example:

[outgoing_pools.ldap.default]\n  workers = 5\n  connection.servers = [\"ldap-server.example.com\"]\n  connection.rootdn = \"cn=admin,dc=example,dc=com\"\n  connection.password = \"ldap-admin-password\"\n\n[outgoing_pools.ldap.bind]\n  connection.servers = [\"ldap-server.example.com\"]\n

For more details see outgoing connections.

"},{"location":"authentication-methods/ldap/#configuration-options","title":"Configuration options","text":""},{"location":"authentication-methods/ldap/#authldappool_tag","title":"auth.ldap.pool_tag","text":"
  • Syntax: string
  • Default: \"default\"
  • Example: pool_tag = \"my_pool\"

Specifies the tag for the primary outgoing connection pool for LDAP authentication.

"},{"location":"authentication-methods/ldap/#authldapbind_pool_tag","title":"auth.ldap.bind_pool_tag","text":"
  • Syntax: string
  • Default: \"bind\"
  • Example: bind_pool_tag = \"my_bind_pool\"

Specifies the tag for the secondary outgoing connection pool for LDAP authentication, used for operations requiring the bind operations, such as checking passwords.

"},{"location":"authentication-methods/ldap/#authldapbase","title":"auth.ldap.base","text":"
  • Syntax: string
  • Default: no default, this option is mandatory
  • Example: base = \"ou=Users,dc=example,dc=com\"

LDAP base directory which stores user accounts.

"},{"location":"authentication-methods/ldap/#authldapuids","title":"auth.ldap.uids","text":"
  • Syntax: array of TOML tables with the following content:
    • attr - string, mandatory, name of the attribute
    • format - pattern, default: \"%u\", requires attr
  • Default: [{attr = \"uid\"}]
  • Example: uids = [{attr = \"uid\", format = \"%u@example.org\"}, {attr = \"another_uid\"}]

List of LDAP attributes that contain the user name (user's part of the JID), used to search for user accounts. They are used as alternatives - it is enough if one of them contains the name. By default the whole value of the attribute is expected to be the user name. If this is not the case, use the format option. It must contain one and only one pattern variable %u which will be replaced by the user name.

"},{"location":"authentication-methods/ldap/#authldapfilter","title":"auth.ldap.filter","text":"
  • Syntax: string
  • Default: not set
  • Example: filter = \"(&(objectClass=shadowAccount)(memberOf=Jabber Users))\"

An additional LDAP filter used to narrow down the search for user accounts. Do not forget to close the brackets and do not use superfluous whitespaces as this expression is processed before sending to LDAP - the match for user name (see ldap.uids) is added automatically.

"},{"location":"authentication-methods/ldap/#authldapdn_filter","title":"auth.ldap.dn_filter","text":"
  • Syntax: TOML table with the following content:
    • filter - string (LDAP filter), mandatory
    • attributes - array of strings (attribute names)
  • Default: not set
  • Example: dn_filter = {filter = \"(&(name=%s)(owner=%D)(user=%u@%d))\", attributes = [\"sn\"]}

This filter is applied to the results returned by the main filter. It performs an additional LDAP lookup to provide the complete result. This is useful when you are unable to define all filter rules in ldap.filter. You can define %u, %d, %s and %D pattern variables in the filter:

  • %u is replaced by the user\u2019s part of a JID,
  • %d is replaced by the corresponding domain (virtual host),
  • %s variables are consecutively replaced by values of the attributes listen as attributes
  • %D is replaced by the Distinguished Name.

Since this filter makes additional LDAP lookups, use it only as the last resort; try to define all filter rules in ldap.filter if possible.

"},{"location":"authentication-methods/ldap/#authldaplocal_filter","title":"auth.ldap.local_filter","text":"
  • Syntax: TOML table with the following content:
    • operation - string, mandatory, \"equal\" or \"notequal\"
    • attribute - string, mandatory, LDAP attribute
    • values - array of strings (attribute values)
  • Default: not set
  • Example: local_filter = {operation = \"equal\", attribute = \"accountStatus\", values = [\"enabled\"]}

If you can\u2019t use the ldap.filter due to performance reasons (the LDAP server has many users registered), you can use this local filter. The local filter checks an attribute in MongooseIM, not in LDAP, so this limits the load on the LDAP directory.

The example above shows a filter which matches accounts with the \"enabled\" status. Another example is shown below - it matches any account that is neither \"disabled\" nor \"blacklisted\". It also shows the usage of TOML dotted keys, which is recommended when the inline table grows too big.

   local_filter.operation = \"notequal\"\n   local_filter.attribute = \"accountStatus\"\n   local_filter.values = [\"disabled\", \"blacklisted\"]\n
"},{"location":"authentication-methods/ldap/#authldapderef","title":"auth.ldap.deref","text":"
  • Syntax: string, one of: \"never\", \"always\", \"finding\", \"searching\"
  • Default: \"never\"
  • Example: deref = \"always\"

Specifies whether or not to dereference aliases: finding means to dereference only when finding the base and searching - only when performing the LDAP search. See the documentation on LDAP search operation for more information.

"},{"location":"authentication-methods/ldap/#example","title":"Example","text":"
[auth.ldap]\n  base = \"ou=Users,dc=example,dc=com\"\n  filter = \"(objectClass=inetOrgPerson)\"\n
"},{"location":"authentication-methods/pki/","title":"PKI","text":""},{"location":"authentication-methods/pki/#overview","title":"Overview","text":"

This is a simple authentication method, meant to be used with the SASL EXTERNAL mechanism. It simply accepts all usernames as long as they are validated by the SASL logic.

Warning

Some of its callbacks return hardcoded values, as it's impossible for this backend to properly acquire certain pieces of information. These include:

Function Hardcoded value Explanation does_user_exist true PKI reponds with true to modules checking if user's interlocutor actually exists so e.g. messages to nonexistent users will always be stored by mod_mam. This is not necessarily a security threat but something to be aware of. dirty_get_registered_users, get_vh_registered_users, get_vh_registered_users_number [] Any metrics or statistics (e.g. available via mongooseimctl) related to accounts list or numbers, won't display proper values, as this backend cannot possibly \"know\" how many users there are."},{"location":"authentication-methods/pki/#configuration-options","title":"Configuration options","text":"

None.

"},{"location":"authentication-methods/pki/#example","title":"Example","text":"
[auth.pki]\n
"},{"location":"authentication-methods/rdbms/","title":"RDBMS","text":""},{"location":"authentication-methods/rdbms/#overview","title":"Overview","text":"

This authentication method stores user accounts in a relational database, e.g. MySQL or PostgreSQL.

"},{"location":"authentication-methods/rdbms/#configuration-options","title":"Configuration options","text":"

The rdbms method uses an outgoing connection pool of type rdbms with the default tag - it has to be defined in the outgoing_pools section.

"},{"location":"authentication-methods/rdbms/#authrdbmsusers_number_estimate","title":"auth.rdbms.users_number_estimate","text":"
  • Syntax: boolean
  • Default: false
  • Example: users_number_estimate = true

By default querying MongooseIM for the number of registered users uses the SELECT COUNT query, which might be slow. Enabling this option makes MongooseIM use an alternative query that might be not as accurate, but is always fast.

Note

This option is effective only for MySQL and PostgreSQL.

"},{"location":"authentication-methods/rdbms/#example","title":"Example","text":"

Authentication:

[auth.rdbms]\n  users_number_estimate = true\n

Outgoing pools:

[outgoing_pools.rdbms.default.connection]\n  driver = \"pgsql\"\n  host = \"localhost\"\n  database = \"mongooseim\"\n  username = \"mongooseim\"\n  password = \"mongooseim_secret\"\n
"},{"location":"configuration/Erlang-cookie-security/","title":"Erlang Cookie Security","text":"

In order for MongooseIM nodes to communicate with each other, they have to share a common secret - i.e. a cookie - which is a feature of the underlying Erlang VM. The cookie itself is an UTF-8 string that is up to 255 characters in size. Thanks to the cookie, MongooseIM nodes can determine if they are allowed to communicate with each other and with no cookie no communication would flow between the nodes - a feature especially useful when you are running more than one applications on a single machine.

For ease of deployment and staging, each MongooseIM node is configured with a predefined erlang cookie. However, one should remember that for production environments this cookie should be reconfigured to a new secret cookie, as this will secure your system from intrusion. You can change the cookie by changing the parameters of the -setcookie parameter in the vm.args file.

Nonetheless, one should remember that communication between Erlang nodes is unencrypted by default, hence, the cookie is vulnerable to sniffing. If one has access to a MongooseIM cookie and figures out the hostname of a node, one can execute shell commands remotely on that node. Therefore, one should either provide privacy at the network layer (strongly recommended) or disable port 4369 for ultimate security.

"},{"location":"configuration/Modules/","title":"Options: Extension Modules","text":"

MongooseIM provides a wide range of pluggable and configurable modules, that implement various features including XEPs. For instance mod_muc enables Multi-User Chat (group chat), mod_mam gives us Message Archive Management, and mod_stream_management is for stanza acknowledgement and stream resumption. This modular architecture provides great flexibility for everyday operations and feature development.

A module configuration generally looks like this:

[modules.mod_muc]\n  host = \"muc.@HOST@\"\n  access = \"muc\"\n  access_create = \"muc_create\"\n

"},{"location":"configuration/Modules/#iq-processing-policies","title":"IQ processing policies","text":"

Some of the modules feature an iqdisc parameter. It defines the method for handling incoming IQ stanzas.

The server may use one of the following strategies to handle incoming IQ stanzas:

"},{"location":"configuration/Modules/#modulesiqdisctype","title":"modules.*.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", or \"parallel\"
  • Example: iqdisc.type = \"one_queue\"

Note

In the \"queues\" case alone, the following key becomes mandatory:

"},{"location":"configuration/Modules/#modulesiqdiscworkers","title":"modules.*.iqdisc.workers","text":"
  • Syntax: positive integer
  • Example: iqdisc.workers = 50

Their semantics works as follows:

  • no_queue registers a new IQ handler, which will be called in the context of the process serving the connection on which the IQ arrives.
  • one_queue spawns a new process by which the incoming IQ stanzas will be handled.
  • queues spawns N worker processes, as provided by the iqdisc.workers key. Every incoming stanza will be then handled by one of those processes.
  • parallel registers the handler without spawning any process: a new process will be spawned in place, for each incoming stanza.
"},{"location":"configuration/Modules/#modules-list","title":"Modules list","text":""},{"location":"configuration/Modules/#mod_adhoc","title":"mod_adhoc","text":"

Implements XEP-0050: Ad-Hoc Commands for advertising and executing application-specific commands, such as those related to a configuration workflow, using XEP-0004: Data Forms in order to structure the information exchange. This is extremely useful for use cases such as remote administration, user engagement via polls, and ChatBots.

"},{"location":"configuration/Modules/#mod_amp","title":"mod_amp","text":"

Implements a subset of XEP-0079: Advanced Message Processing functionality, that enables entities to request, and servers to perform advanced processing of XMPP message stanzas, including reliable data transport, time-sensitive delivery, and expiration of transient messages.

"},{"location":"configuration/Modules/#mod_auth_token","title":"mod_auth_token","text":"

A module used by SASL X-OAUTH mechanism. It provides an API to manage custom OAuth tokens. It requires mod_keystore as an actual key database.

"},{"location":"configuration/Modules/#mod_blocking","title":"mod_blocking","text":"

Implements XEP-0191: Blocking Command, a simplified interface to privacy lists.

"},{"location":"configuration/Modules/#mod_bind2","title":"mod_bind2","text":"

Implements XEP-0386: Bind 2.

"},{"location":"configuration/Modules/#mod_bosh","title":"mod_bosh","text":"

Allows users to connect to MongooseIM using BOSH (Bidirectional-streams Over Synchronous HTTP), the HTTP long-polling technique described in XEP-0124: Bidirectional-streams Over Synchronous HTTP (BOSH) and XEP-0206: XMPP Over BOSH.

"},{"location":"configuration/Modules/#mod_caps","title":"mod_caps","text":"

Implements XEP-0115: Entity Capabilities. It queries clients for their supported functionalities and caches them in Mnesia. This module tightly cooperates with mod_pubsub in order to deliver PEP events to user's subscribers.

"},{"location":"configuration/Modules/#mod_carboncopy","title":"mod_carboncopy","text":"

Implements XEP-0280: Message Carbons in order to keep all IM clients for a user engaged in a real-time conversation by carbon-copying all inbound and outbound messages to all interested resources (Full JIDs).

"},{"location":"configuration/Modules/#mod_csi","title":"mod_csi","text":"

Enables the XEP-0352: Client State Indication functionality.

"},{"location":"configuration/Modules/#mod_disco","title":"mod_disco","text":"

Implements XEP-0030: Service Discovery for discovering information (capabilities, protocols, features) about other XMPP entities.

"},{"location":"configuration/Modules/#mod_event_pusher","title":"mod_event_pusher","text":"

A framework module to build other notification-based modules on.

"},{"location":"configuration/Modules/#mod_event_pusher_sns","title":"mod_event_pusher_sns","text":"

Allows sending online/offline notifications, chat and groupchat messages as events to Amazon Simple Notification Service.

"},{"location":"configuration/Modules/#mod_event_pusher_rabbit","title":"mod_event_pusher_rabbit","text":"

Allows sending presence changes (to available/unavailable), chat and groupchat messages as events to a RabbitMQ server.

"},{"location":"configuration/Modules/#mod_event_pusher_push","title":"mod_event_pusher_push","text":"

Implements XEP-0357: Push Notifications to provide push notifications to clients that are temporary unavailable.

"},{"location":"configuration/Modules/#mod_event_pusher_http","title":"mod_event_pusher_http","text":"

Forward events to an external HTTP service. This applies to situations such as sending messages or presences to mobile/SMS/email push service, big data, or an analytics service.

"},{"location":"configuration/Modules/#mod_extdisco","title":"mod_extdisco","text":"

Implements XEP-0215: External Service Discovery for discovering information about services external to the XMPP network. The main use-case is to help discover STUN/TURN servers to allow for negotiating media exchanges.

"},{"location":"configuration/Modules/#mod_http_upload","title":"mod_http_upload","text":"

Implements XEP-0363: HTTP File Upload for coordinating with an XMPP server to upload files via HTTP and receive URLs that can be shared in messages.

"},{"location":"configuration/Modules/#mod_inbox","title":"mod_inbox","text":"

Implements custom inbox XEP

"},{"location":"configuration/Modules/#mod_global_distrib","title":"mod_global_distrib","text":"

Enables sharing a single XMPP domain between distinct datacenters (experimental).

"},{"location":"configuration/Modules/#mod_jingle_sip","title":"mod_jingle_sip","text":"

Enables Jingle to SIP and SIP to Jingle translator.

"},{"location":"configuration/Modules/#mod_keystore","title":"mod_keystore","text":"

Serves as a storage for crypto keys for mod_auth_token.

"},{"location":"configuration/Modules/#mod_last","title":"mod_last","text":"

Implements XEP-0012: Last Activity for communicating information about the last activity associated with an XMPP entity (most recent presence information from an offline contact).

"},{"location":"configuration/Modules/#mod_mam","title":"mod_mam","text":"

Implements XEP-0313: Message Archive Management, that defines a protocol to query and control an archive of messages stored on a server.

"},{"location":"configuration/Modules/#mod_muc","title":"mod_muc","text":"

Implements XEP-0045: Multi-User Chat, for a featureful multi-user text chat (group chat), whereby multiple XMPP users can exchange messages in the context of a chat room. It is tightly coupled with user presence in chat rooms.

"},{"location":"configuration/Modules/#mod_muc_log","title":"mod_muc_log","text":"

Implements a logging subsystem for mod_muc.

"},{"location":"configuration/Modules/#mod_muc_light","title":"mod_muc_light","text":"

Implements XEP Multi-User Chat Light.

"},{"location":"configuration/Modules/#mod_offline","title":"mod_offline","text":"

Provides an offline messages storage that is compliant with XEP-0160: Best Practices for Handling Offline Messages.

"},{"location":"configuration/Modules/#mod_offline_stub","title":"mod_offline_stub","text":"

Prevents <service-unavailable/> error when the message recipient is offline.

"},{"location":"configuration/Modules/#mod_ping","title":"mod_ping","text":"

Implements XEP-0199: XMPP Ping, enabling periodic XMPP pings sent to clients and responds to those sent from clients.

"},{"location":"configuration/Modules/#mod_privacy","title":"mod_privacy","text":"

This module implements XEP-0016: Privacy Lists, for enabling or disabling communication with other entities on a network.

"},{"location":"configuration/Modules/#mod_private","title":"mod_private","text":"

Implements XEP-0049: Private XML Storage to store and query private user data in XML format.

"},{"location":"configuration/Modules/#mod_pubsub","title":"mod_pubsub","text":"

This extension implements XEP-0060: Publish-Subscribe. It is a pluggable implementation using behaviours provided by node_*.erl and nodetree_*.erl modules.

"},{"location":"configuration/Modules/#mod_push_service_mongoosepush","title":"mod_push_service_mongoosepush","text":"

Handles push notifications generated by mod_pubsub's node_push and passes them to MongoosePush service.

"},{"location":"configuration/Modules/#mod_register","title":"mod_register","text":"

Implements XEP-0077: In-Band Registration, that enables creating an account and changing the password once connected. This does not provide a solution to the forgotten password use case via SMS or email.

"},{"location":"configuration/Modules/#mod_roster","title":"mod_roster","text":"

Roster support, specified in RFC 6121. Includes support for XEP-0237: Roster Versioning.

"},{"location":"configuration/Modules/#mod_sasl2","title":"mod_sasl2","text":"

Implements XEP-0388: Extensible SASL Profile.

"},{"location":"configuration/Modules/#mod_shared_roster_ldap","title":"mod_shared_roster_ldap","text":"

This module, when enabled, will inject roster entries fetched from LDAP.

"},{"location":"configuration/Modules/#mod_sic","title":"mod_sic","text":"

Implements XEP-0279: Server IP Check that enables a client to discover its external IP address.

"},{"location":"configuration/Modules/#mod_stream_management","title":"mod_stream_management","text":"

Enables XEP-0198: Stream Management functionality that defines the active management of an XML stream between two XMPP entities, including features for stanza acknowledgements and stream resumption.

"},{"location":"configuration/Modules/#mod_time","title":"mod_time","text":"

XEP-0202: Entity Time implementation. With this extensions, clients can get the current server time.

"},{"location":"configuration/Modules/#mod_vcard","title":"mod_vcard","text":"

Provides support for vCards, as specified in XEP-0054: vcard-temp and XEP-0055: Jabber Search.

"},{"location":"configuration/Modules/#mod_version","title":"mod_version","text":"

This module provides the functionality specified in XEP-0092: Software Version.

"},{"location":"configuration/Modules/#modules-incompatible-with-dynamic-domains","title":"Modules incompatible with dynamic domains","text":"

There are some modules that don't support dynamic domains for now. These must not be enabled when using host types in modules or host_config.modules sections:

  • mod_event_pusher
  • mod_global_distrib
  • mod_jingle_sip
  • mod_pubsub
  • mod_push_service_mongoosepush
  • mod_shared_roster_ldap

Please note, that s2s and the XMPP components (XEP-0114) mechanism, as configured in the listen.service section, do not support dynamic domains as well.

"},{"location":"configuration/Services/","title":"Options: Services","text":"

Some functionalities in MongooseIM are provided by \"services\". A service is similar to a module, but while a module is started for every host type and may have global or specific configuration, a service is started only once with global configuration. Currently, three modules are categorised as \"service providers\". Eventually the modules which are not specific for a host type will be refactored to be services.

  • Syntax: Each service is specified in its own services.* section.
  • Default: None - each service needs to be enabled explicitly. Typical services are already specified in the example configuration file.
  • Example: A configuration of the service_domain_db service.
[services.service_domain_db]\n  event_cleaning_interval = 1000\n  event_max_age = 5000\n
"},{"location":"configuration/Services/#service_mongoose_system_metrics","title":"service_mongoose_system_metrics","text":"

MongooseIM system metrics are being gathered to analyse the trends and needs of our users, improve MongooseIM, and get to know where to focus our efforts. See System Metrics Privacy Policy for more details.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricsreport","title":"services.service_mongoose_system_metrics.report","text":"
  • Syntax: boolean
  • Default: not specified
  • Example: report = true

An explicit acknowledgement that the metrics are gathered and reported. When this option is not specified, the reports are gathered, and a notification appears in logs on startup. Enabling this option silences the notification reminder that metrics are gathered. When this option is set to false, System Metrics Service is not started and metrics are not collected.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricsintial_report","title":"services.service_mongoose_system_metrics.intial_report","text":"
  • Syntax: non-negative integer
  • Default: 300_000 (milliseconds - 5 minutes).
  • Example: intial_report = 300_000

Time delay counted when the service is started after which the first metrics report is created and sent.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricsperiodic_report","title":"services.service_mongoose_system_metrics.periodic_report","text":"
  • Syntax: non-negative integer
  • Default: 108_000_000 (milliseconds - 3 hours)
  • Example: periodic_report = 108_000_000

Time delay for a periodic update report to be created and sent.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricstracking_idid","title":"services.service_mongoose_system_metrics.tracking_id.id:","text":"
  • Syntax: string
  • Default: no default.
  • Example: tracking_id.id = \"G-123456789\"

Tracking ID to forward the reported metrics so that they can be viewed in the Google Analytics dashboard.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricstracking_idsecret","title":"services.service_mongoose_system_metrics.tracking_id.secret:","text":"
  • Syntax: string
  • Default: no default.
  • Example: tracking_id.secret = \"Secret\"

Removing the services.service_mongoose_system_metrics entry will result in the service not being started. Metrics will not be collected and shared. It will generate a notification that the feature is not being used. The notification can be silenced by setting the no_report option explicitly.

"},{"location":"configuration/Services/#service_domain_db","title":"service_domain_db","text":"

This service is needed to use the dynamic domains API. It is used to synchronise dynamic domains between nodes after starting.

"},{"location":"configuration/Services/#servicesservice_domain_dbdb_pool","title":"services.service_domain_db.db_pool","text":"
  • Syntax: string
  • Default: global
  • Example: db_pool = \"my_host_type\"

By default, this service uses the RDBMS connection pool configured with the scope \"global\". You can put a specific host type there to use the default pool with the host_type scope for that particular host type. See the outgoing connections docs for more information about pool scopes.

"},{"location":"configuration/Services/#servicesservice_domain_dbevent_cleaning_interval","title":"services.service_domain_db.event_cleaning_interval","text":"
  • Syntax: positive integer
  • Default: 1800 (seconds - 30 minutes)
  • Example: event_cleaning_interval = 1800

The number of seconds between cleaning attempts of the domain_events table.

"},{"location":"configuration/Services/#servicesservice_domain_dbevent_max_age","title":"services.service_domain_db.event_max_age","text":"
  • Syntax: positive integer
  • Default: 7200 (seconds - 2 hours)
  • Example: event_max_age = 7200

The number of seconds after an event must be deleted from the domain_events table.

"},{"location":"configuration/Services/#example-configuration","title":"Example configuration","text":"
[services.service_mongoose_system_metrics]\n  report = true\n  initial_report = 300_000\n  periodic_report = 108_000_000\n  tracking_id.id = \"G-123456789\"\n  tracking_id.secret = \"Secret\"\n\n[services.service_domain_db]\n  db_pool = \"global\"\n  event_cleaning_interval = 1800\n  event_max_age = 7200\n
"},{"location":"configuration/TLS-hardening/","title":"TLS Hardening","text":""},{"location":"configuration/TLS-hardening/#otp-tls-vs-fast-tls","title":"OTP TLS vs. Fast TLS","text":"

Before we explain the TLS hardening in MongooseIM, we need to describe the TLS libraries used in the project. These are \"OTP TLS\" and \"Fast TLS\".

The former is provided by (as the name suggests) OTP as the ssl application. Large part of the logic is implemented in Erlang but it calls OpenSSL API for some operations anyway.

The latter is a community-maintained driver, which is implemented as NIFs (native C code). It uses OpenSSL API for all operations.

Most MongooseIM components use the TLS library provided by OTP. However, some of them choose to integrate with fast_tls library instead. The former one is used primarily by MIM dependencies, while the latter is used only by MIM modules.

None of them is strictly better than the other. Below you may find a summary of the differences between them.

  • fast_tls is faster
  • There are options that OTP TLS (a.k.a just_tls in the C2S listener configuration) supports exclusively:
    • Immediate connection drop when the client certificate is invalid
    • Certificate Revocation Lists
    • More flexible certificate verification options
  • Allowed protocol versions may be configured:
    • Globally for OTP TLS via an environment variable
    • Per socket in Fast TLS via OpenSSL cipher string
"},{"location":"configuration/TLS-hardening/#deprecations","title":"Deprecations","text":"

MongooseIM is configured to allow only TLS 1.2 or higher, due to known vulnerabilities in TLS 1.0 and 1.1. It is still possible to enable earlier versions, however it is strongly discouraged.

"},{"location":"configuration/TLS-hardening/#otp-tls-hardening","title":"OTP TLS hardening","text":"

Protocol list for OTP TLS is set via the protocol_version environment variable. It's an Erlang runtime variable, so it is not configured in the OS but rather in theapp.config file. It may be found in etc/ folder inside MongooseIM release and in [repository root]/rel/files/.

In order to change the list, please find the following lines:

{protocol_version, ['tlsv1.2',\n                    'tlsv1.3'\n          ]}\n

The remaining valid values are: 'tlsv1.1', tlsv1, sslv3.

This setting affects the following MongooseIM components:

  • Raw XMPP over TCP connections, if a C2S listener is configured to use just_tls
  • All outgoing connections (databases, AMQP, SIP etc.)
  • HTTP endpoints
"},{"location":"configuration/TLS-hardening/#fast-tls-hardening","title":"Fast TLS hardening","text":"

Fast TLS expects an OpenSSL cipher string as one of optional connection parameters. This string is configured individually for every module that uses it. By default, MongooseIM sets this option to TLSv1.2:TLSv1.3 for each component.

The list below enumerates all components that use Fast TLS and describes how to change this string.

  • listen.c2s - main user session abstraction + XMPP over TCP listener
    • Please consult the respective section in Listener modules.
  • listen.s2s - incoming S2S connections (XMPP Federation)
    • Please consult the respective section in Listener modules.
  • s2s - outgoing S2S connections (XMPP Federation)
    • Please check the documentation for s2s_ciphers option.
  • mod_global_distrib - Global Distribution module
    • Please add connections.tls.ciphers = \"string\" to modules.mod_global_distrib module, as described in the documentation.
"},{"location":"configuration/access/","title":"Options: Access","text":"

The access section is used to define access rules which return specific values for specific access classes.

  • Syntax: each access rule is a key-value pair, where:
    • Key is the name of the rule,
    • Value is a TOML array of rule clauses - TOML tables, whose format is described below.
  • Default: no default - each access rule needs to be specified explicitly.
  • Example: see the examples below.
"},{"location":"configuration/access/#access-rule-clauses","title":"Access rule clauses","text":"

Whenever a rule is checked to obtain the resulting value for a user, the clauses are traversed one by one until a matching one is found or the list is exhausted (in which case the special value deny is returned).

Each clause has to contain the following keys:

"},{"location":"configuration/access/#accessacl","title":"access.*.acl","text":"
  • Syntax: string
  • Example: acl = \"local\"

The access class defined in the acl section. The user is matched against it. The special name all is a catch-all value that matches any user. If the class does not exist, the clause does not match (there is no error).

"},{"location":"configuration/access/#accessvalue","title":"access.*.value","text":"
  • Syntax: string or integer
  • Example: value = \"allow\"

For rules determining access, the value will be \"allow\" or \"deny\". For other rules it can be an integer or a string.

"},{"location":"configuration/access/#rule-examples","title":"Rule examples","text":"

The following access rules are already defined in the example configuration file.

"},{"location":"configuration/access/#c2s-access","title":"C2S Access","text":"

The c2s rule is used to allow/deny the users to establish C2S connections:

  c2s = [\n    {acl = \"blocked\", value = \"deny\"},\n    {acl = \"all\", value = \"allow\"}\n  ]\n

It has the following logic:

  • if the access class is blocked, the returned value is \"deny\",
  • otherwise, the returned value is \"allow\".

The blocked access class can be defined in the acl section and match blacklisted users.

For this rule to take effect, it needs to be referenced in the options of a C2S listener.

"},{"location":"configuration/access/#c2s-shaper","title":"C2S Shaper","text":"

The c2s_shaper rule is used to determine the shaper used to limit the incoming traffic on C2S connections:

  c2s_shaper = [\n    {acl = \"admin\", value = \"none\"},\n    {acl = \"all\", value = \"normal\"}\n  ]\n

It has the following logic:

  • if the access class is admin, the returned value is \"none\",
  • otherwise, the returned value is \"normal\".

The admin access class can be defined in the acl to specify admin users who will bypass the normal shaper.

For this rule to take effect, it needs to be referenced in the options of a C2S listener.

"},{"location":"configuration/access/#s2s-shaper","title":"S2S Shaper","text":"

The s2s_shaper rule is used to determine the shaper used to limit the incoming traffic on C2S connections:

  s2s_shaper = [\n    {acl = \"all\", value = \"fast\"}\n  ]\n

It assigns the fast shaper to all S2S connections.

For this rule to take effect, it needs to be referenced in the options of an S2S listener.

"},{"location":"configuration/access/#muc","title":"MUC","text":"

The following rules manage the permissions of MUC operations:

  muc_admin = [\n    {acl = \"admin\", value = \"allow\"}\n  ]\n\n  muc_create = [\n    {acl = \"local\", value = \"allow\"}\n  ]\n\n  muc = [\n    {acl = \"all\", value = \"allow\"}\n  ]\n

They are referenced in the options of the mod_muc module.

"},{"location":"configuration/access/#registration","title":"Registration","text":"

This rule manages the permissions to create new users with mod_register.

  register = [\n    {acl = \"all\", value = \"allow\"}\n  ]\n

It needs to be referenced in the options of the mod_register module.

"},{"location":"configuration/access/#mam-permissions","title":"MAM permissions","text":"

These rules set the permissions for MAM operations triggered by IQ stanzas and handled by the mod_mam module.

  mam_set_prefs = [\n    {acl = \"all\", value = \"default\"}\n  ]\n\n  mam_get_prefs = [\n    {acl = \"all\", value = \"default\"}\n  ]\n\n  mam_lookup_messages = [\n    {acl = \"all\", value = \"default\"}\n  ]\n

They can return \"allow\", \"deny\" or \"default\". The last value uses the default setting for the operation, which is to allow the operation when the sender and recipient JID's are the same.

MAM for MUC permissions has muc_ prefix:

  muc_mam_set_prefs = [\n    {acl = \"all\", value = \"default\"}\n  ]\n\n  muc_mam_get_prefs = [\n    {acl = \"all\", value = \"default\"}\n  ]\n\n  muc_mam_lookup_messages = [\n    {acl = \"all\", value = \"default\"}\n  ]\n
"},{"location":"configuration/access/#mam-shapers","title":"MAM shapers","text":"

These rules limit the rate of MAM operations triggered by IQ stanzas.

  mam_set_prefs_shaper = [\n    {acl = \"all\", value = \"mam_shaper\"}\n  ]\n\n  mam_get_prefs_shaper = [\n    {acl = \"all\", value = \"mam_shaper\"}\n  ]\n\n  mam_lookup_messages_shaper = [\n    {acl = \"all\", value = \"mam_shaper\"}\n  ]\n\n  mam_set_prefs_global_shaper = [\n    {acl = \"all\", value = \"mam_global_shaper\"}\n  ]\n\n  mam_get_prefs_global_shaper = [\n    {acl = \"all\", value = \"mam_global_shaper\"}\n  ]\n\n  mam_lookup_messages_global_shaper = [\n    {acl = \"all\", value = \"mam_global_shaper\"}\n  ]\n

For each operation there are two rules:

  • *_shaper - limits the number of operations per user connection per second,
  • *_global_shaper - limits the number of operations per server node per second.

The values returned by the rules (mam_shaper, mam_global_shaper) are shaper names, which need to be defined in the shaper section.

MAM for MUC shapers has muc_ prefix.

"},{"location":"configuration/access/#maximum-number-of-sessions","title":"Maximum number of sessions","text":"

The max_user_sessions rule is used to determine the maximum number of sessions a user can open.

  max_user_sessions = [\n    {acl = \"all\", value = 10}\n  ]\n

By default, all users can open at most 10 concurrent sessions.

"},{"location":"configuration/access/#maximum-number-of-offline-messages","title":"Maximum number of offline messages","text":"

The max_user_offline_messages rule is used to determine the maximum number of messages that is stored for a user by the mod_offline module.

  max_user_offline_messages = [\n    {acl = \"admin\", value = 5000},\n    {acl = \"all\", value = 100}\n  ]\n

It has the following logic:

  • if the access class is admin, the returned value is 5000,
  • otherwise, the returned value is 100.

This means that the admin users can have 5000 messages stored offline, while the others can have at most 100. The admin access class can be defined in the acl section.

"},{"location":"configuration/access/#for-developers","title":"For developers","text":"

To access the rule functionality, one has to use the acl:match_rule/3 function.

Given the following rule:

  register = [\n    {acl = \"all\", value = \"deny\"}\n  ]\n

One can call:

acl:match_rule(<<\"localhost\">>, register, jid:make(<<\"p\">>, <<\"localhost\">>, <<>>)).

Which in our case will return deny. If the rule is not host specific, one can use global instead of <<\"localhost\">>.

"},{"location":"configuration/acl/","title":"Options: Acl","text":"

The acl section is used to define access classes to which the connecting users are assigned. These classes are used in access rules.

  • Syntax: each access class is a key-value pair, where:
    • Key is the name of the access class,
    • Value is a TOML array of patterns - TOML tables, whose format is described below.
  • Default: no default - each access class needs to be specified explicitly.
  • Example: the local access class is used for the regular users connecting to the C2S listener. The pattern {} matches all users from the current server, because it is equivalent to {match = \"current_domain\"} (see below).
  local = [{}]\n

When there are multiple patterns listed, the resulting pattern will be the union of all of them.

"},{"location":"configuration/acl/#patterns","title":"Patterns","text":"

Each pattern consists of one or more conditions, specified with the options listed below. All defined conditions need to be satisfied for the pattern to be matched successfully.

"},{"location":"configuration/acl/#aclmatch","title":"acl.*.match","text":"
  • Syntax: string, one of: \"all\", \"current_domain\", \"any_hosted_domain\", \"none\"
  • Default: \"current_domain\"
  • Example: match = \"all\"

By default only users from the current domain (the one of the server) are matched. Setting it to \"any_hosted_domain\" results in matching users from all domains hosted by this server. You can also set this option to \"all\", extending the pattern to users from external domains. This option makes a difference for some access rules, e.g. MAM, MUC and registration ones. Setting the option to \"none\" makes the pattern never match.

  everyone = [\n    {match = \"all\"}\n  ]\n
"},{"location":"configuration/acl/#acluser","title":"acl.*.user","text":"
  • Syntax: string
  • Example: user = \"admin\"

Matches all JIDs with the specified user name. The following class includes alice@localhost, but not bob@localhost:

  admin = [\n    {user = \"alice\"},\n    {user = \"charlie\"}\n  ]\n
"},{"location":"configuration/acl/#aclserver","title":"acl.*.server","text":"
  • Syntax: string
  • Example: server = \"localhost\"

Matches all JIDs with the specified domain name. The following class includes alice@localhost, but not alice@xmpp.org:

  localhost_users = [\n    {server = \"localhost\"}\n  ]\n

This option can be combined with user - only alice@localhost belongs to the following class:

  admin = [\n    {user = \"alice\", server = \"localhost\"}\n  ]\n
"},{"location":"configuration/acl/#aclresource","title":"acl.*.resource","text":"
  • Syntax: string
  • Example: resource = \"mobile\"

Matches all JIDs with the specified resource name. The following class includes alice@localhost/mobile, but not alice@localhost/home:

  mobile_users = [\n    {resource = \"mobile\"}\n  ]\n
This option can be combined with user and server - only alice@localhost/mobile belongs to the following class:

  admin = [\n    {user = \"alice\", server = \"localhost\", resource = \"mobile\"}\n  ]\n
"},{"location":"configuration/acl/#acluser_regexp","title":"acl.*.user_regexp","text":"
  • Syntax: string, regular expression
  • Example: user_regexp = \"^user.*\"

Matches all JIDs with the user name matching the regular expression. The following class includes alice@localhost and albert@jabber.org, but not bob@localhost:

  ae = [\n    {user_regexp = \"^a.*e\"}\n  ]\n

This option can be combined with server - here albert@jabber.org is excluded:

  localhost_ae = [\n    {user_regexp = \"^a.*e\", server = \"localhost\"}\n  ]\n
"},{"location":"configuration/acl/#aclserver_regexp","title":"acl.*.server_regexp","text":"
  • Syntax: string, regular expression
  • Example: server = \"localhost\"

Matches all JIDs with the domain name matching the regular expression. The following class includes alice@host1, but not alice@xmpp.org:

  host_users = [\n    {server_regexp = \"host\"}\n  ]\n

This option can be combined with user_regexp, e.g. we can require the user name to contain 'a' and the domain name to start with 'a':

  a = [\n    {user_regexp = \"a\", server_regexp = \"^a\"}\n  ]\n
"},{"location":"configuration/acl/#aclresource_regexp","title":"acl.*.resource_regexp","text":"
  • Syntax: string, regular expression
  • Example: resource_regexp = \"^res\"

Matches all JIDs with the resource name matching the regular expression. This class includes bob@xmpp.org/res123, but not bob@xmpp.org/home:

  digital_resources = [\n    {resource_regexp = '^res\\d+$'}\n  ]\n

Note the use of a literal string (single quotes) to prevent \\d from being escaped.

"},{"location":"configuration/acl/#acluser_glob","title":"acl.*.user_glob","text":"
  • Syntax: string, glob pattern
  • Example: user_glob = \"^user.*\"

Matches all JIDs with the user name matching the pattern: The following class includes alice@localhost and albert@jabber.org, but not bob@localhost:

  ae_users = [\n    {user_glob = \"a*e*\"}\n  ]\n

This option can be combined with server - here albert@jabber.org is excluded:

  localhost_ae_users = [\n    {user_glob = \"a*e*\", server = \"localhost\"}\n  ]\n
"},{"location":"configuration/acl/#aclserver_glob","title":"acl.*.server_glob","text":"
  • Syntax: string, glob pattern
  • Example: server = \"localhost\"

Matches all JIDs with the domain name matching the pattern. The following class includes alice@host1, but not alice@xmpp.org:

  localhost_users = [\n    {server_glob = \"host*\"}\n  ]\n

This option can be combined with user_glob, e.g. we can require the user name to contain 'a' and the domain name to start with 'a':

  a = [\n    {user_glob = \"*a*\", server_glob = \"a*\"}\n  ]\n
"},{"location":"configuration/acl/#aclresource_glob","title":"acl.*.resource_glob","text":"
  • Syntax: string, glob pattern
  • Example: resource_glob = \"^res\"

Matches all JIDs with the resource name matching the pattern. This class includes bob@xmpp.org/res123, but not bob@xmpp.org/home:

  limited_resources = [\n    {resource_glob = \"res???\"}\n  ]\n
"},{"location":"configuration/auth/","title":"Options: Auth","text":"

The auth section is used to choose and configure the method which is used by MongooseIM to authenticate connecting users. The following methods are supported:

  • internal - stores the user accounts in an internal Mnesia database,
  • rdbms - stores the user accounts in a SQL database,
  • external - uses an external program to authenticate the user,
  • anonymous - allows anonymous connections,
  • ldap - checks the user credentials in LDAP,
  • jwt - authenticates the users with JSON Web Tokens,
  • http - uses an external HTTP service to authenticate the user,
  • pki - uses the certificate provided by the user to authenticate them,
  • dummy - no authentication, only for development and testing.

To allow the users to connect, you need to choose the authentication method from the list above and enable it by adding a corresponding section. For example, the default configuration file has the [auth.internal] section, which enables the internal method, using the internal Mnesia database to store users and their passwords. However, for production systems other methods like rdbms are recommended, as using an external database offers easier maintenance, flexibility, scalability and configurability in a typical setup. Some methods have more complex setup procedures and have their own specific options - the method names above are links to their descriptions. There are some general authentication options as well, which are described below.

Warning

Make sure that the compatible SASL mechanisms are enabled, see capabilities.

"},{"location":"configuration/auth/#general-options","title":"General Options","text":"

The options listed here affect more than one configuration method.

"},{"location":"configuration/auth/#authmethods","title":"auth.methods","text":"
  • Syntax: array of strings. Allowed values: \"internal\", \"rdbms\", \"external\", \"anonymous\", \"ldap\", \"jwt\", \"http\", \"pki\", \"dummy\"
  • Default: not set
  • Example: methods = [\"internal\", \"anonymous\"]

It is possible to enable more than one method - they are queried one by one in the alphabetical order until one of them succeeds or there are no more methods. You can change the default order by using this option. Make sure that all methods from the list have their corresponding sections included in the auth section, e.g.

[auth]\n  methods = [\"internal\", \"dummy\"]\n\n  [auth.internal]\n\n  [auth.dummy]\n    variance = 1000\n
"},{"location":"configuration/auth/#authsasl_mechanisms","title":"auth.sasl_mechanisms","text":"
  • Syntax: array of strings. Allowed values: \"scram_sha512_plus\", \"scram_sha512\", \"scram_sha384_plus\", \"scram_sha384\", \"scram_sha256_plus\", \"scram_sha256\", \"scram_sha224_plus\", \"scram_sha224\", \"scram_sha1_plus\", \"scram_sha1\", \"plain\", \"anonymous\", \"oauth\", \"external\", \"digest\"
  • Default: [\"scram_sha512_plus\", \"scram_sha512\", \"scram_sha384_plus\", \"scram_sha384\", \"scram_sha256_plus\", \"scram_sha256\", \"scram_sha224_plus\", \"scram_sha224\", \"scram_sha1_plus\", \"scram_sha1\", \"plain\", \"anonymous\", \"oauth\"]
  • Example: sasl_mechanisms = [\"external\", \"plain\"]

Specifies the list of allowed SASL mechanisms, which are announced during stream negotiation and eventually enforced (users can't pick a mechanism not listed here).

Notes

  • This list is still filtered by capabilities. For example, if you use the internal method, only the PLAIN, DIGEST-MD5 and SCRAM-SHA-* mechanisms from the list will be supported. If there are no compatible mechanisms on the list, the users will not be able to authenticate.
  • Configuring the sasl_mechanisms replaces the default list entirely.
  • The order in which the mechanisms are listed in the config will be taken as the order in which they are advertised.
  • All SCRAM-SHA-* mechanisms (specified as scram_sha*) have their counterparts which support channel binding and are advertised as separate authentication mechanisms suffixed by -PLUS (specified as scram_sha*_plus).
  • The DIGEST-MD5 mechanism (specified as digest) is deprecated and will be removed in the next release.
"},{"location":"configuration/auth/#authentication-method-capabilities","title":"Authentication method capabilities","text":"

The table below shows the supported SASL mechanisms (columns) for each authentication method (row).

plain digest scram_sha* anonymous external internal x x x rdbms x x x external x anonymous x x x x ldap x x jwt x http x x x pki x dummy x"},{"location":"configuration/auth/#authsasl_external","title":"auth.sasl_external","text":"
  • Syntax: list of strings, allowed values: \"standard\", \"common_name\", \"auth_id\"
  • Default: [\"standard\"]
  • Example: sasl_external = [\"standard\", \"common_name\"]

There are three possible ways of using the SASL EXTERNAL mechanism:

  • standard - do not accept a certificate with no xmpp_addrs field (default),
  • common_name - use the common_name field if it is provided in the certificate,
  • auth_id - accept a certificate without xmpp_addrs and use the user identity from the authentication request.

This option allows you to list the enabled ones in the order of preference (they are tried until one succeeds or the list is exhausted).

"},{"location":"configuration/auth/#authmax_users_per_domain","title":"auth.max_users_per_domain","text":"
  • Syntax: positive integer or string \"infinity\", representing maximum amount of users that can be registered in a domain
  • Default: \"infinity\"
  • Example: max_users_per_domain = 10000

Limits the number of users that can be registered for each domain. If the option is configured to the value \"infinity\", no limit is present.

Warning

The limit only works for the following authentication methods: internal, rdbms and ldap.

"},{"location":"configuration/auth/#password-related-options","title":"Password-related options","text":"

These options are common to the http, rdbms and internal methods.

"},{"location":"configuration/auth/#authpasswordformat","title":"auth.password.format","text":"
  • Syntax: string, one of: \"plain\", \"scram\"
  • Default: \"scram\"
  • Example: password.format = \"plain\"

Decide whether user passwords will be kept plain or hashed in the database. Currently, popular XMPP clients support the SCRAM method and it is strongly recommended to use the hashed version. The older XMPP clients can still use the PLAIN mechanism even if the format is set to scram.

Note

The DIGEST-MD5 mechanism is not available with the scram password format.

"},{"location":"configuration/auth/#scram-options","title":"SCRAM options","text":"

For these options to take effect, password.format should be set to scram.

"},{"location":"configuration/auth/#authpasswordhash","title":"auth.password.hash","text":"
  • Syntax: list of strings, allowed values: \"sha\", \"sha224\", \"sha256\", \"sha384\", \"sha512\"
  • Default: not set - all hash functions supported
  • Example: password.hash = [\"sha384\", \"sha512\"]

MongooseIM supports SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for SCRAM hashing. You can use this option to limit the supported hash functions by listing them explicitly. The value \"sha\" stands for the SHA-1 algorithm.

Warning

This option limits the supported SCRAM-SHA-* SASL mechanisms to the ones compatible with the specified hash functions.

"},{"location":"configuration/auth/#authpasswordscram_iterations","title":"auth.password.scram_iterations","text":"
  • Syntax: positive integer
  • Default: 10000, as recommended in this XEP and this NIST Guidelines
  • Example: password.scram_iterations = 20_000

Hash function round count. This is a tradeoff between latency and security. The higher the value, the more difficult breaking the hashes is: increasing the count increases the work it requires to compute a full derivation, which effectively slows down brute-force attacks. But it adds load on both client and server, so this parameter should be tuned as high as the business-rules allow. Note that increasing the security of a password has a higher impact over the security of the algorithm, without impacting its load. See more information in this NIST guide, Appendix A.2.2

"},{"location":"configuration/auth/#examples","title":"Examples","text":"

Internal authentication method without any general options - you can skip the auth section in this case:

[auth.internal]\n

Internal authentication method with some general options:

[auth]\n  password.hash = [\"sha512\"]\n  password.scram_iterations = 20000\n\n  [auth.internal]\n

For more specific examples, see the links below.

"},{"location":"configuration/auth/#method-specific-options","title":"Method-specific options","text":"

See the links below for options related to the particular methods:

  • RDBMS method options
  • Anonymous method options
  • External method options
  • LDAP method options
  • JWT method options
  • HTTP method options
"},{"location":"configuration/configuration-files/","title":"Configuration Files","text":"

The following files are used to configure MongooseIM:

  • mongooseim.toml for MongooseIM settings,

  • vm.args to affect the Erlang VM behaviour (performance tuning, node name),

  • app.config to change low-level logging parameters and settings of other Erlang applications.

"},{"location":"configuration/configuration-files/#mongooseimtoml","title":"mongooseim.toml","text":"

This TOML file contains the configuration options for the MongooseIM server. It is located at [MongooseIM repo root]/rel/files/ if you are building from source or [MongooseIM install root]/etc/ if you are using a pre-built version.

The file is divided into the following sections:

  • general - Served XMPP domains, log level, server language and some other miscellaneous settings.
  • listen - Configured listeners, receiving incoming XMPP and HTTP connections.
  • auth - Supported client authentication methods and their options.
  • internal_databases - Options for Mnesia and CETS. They are primarily used for clustering.
  • outgoing_pools - Outgoing connections to external services, including databases, message queues and HTTP services.
  • services - Internal services like an administration API and system metrics.
  • modules - XMPP extension modules, which extend the basic functionality provided by XMPP.
  • shaper - Traffic shapers that limit the incoming XMPP traffic, providing a safety valve to protect the server.
  • acl - Access classes to which connecting users are assigned.
  • access - Access rules, specifying the privileges of the defined access classes.
  • s2s - Server-to-server connection options, used for XMPP federation.
  • host_config - Configuration options for different XMPP domains or host types (groups of domains).

The section names above are links to the detailed documentation of each section.

Warning

It is recommended to use the same configuration file for all nodes in the cluster, but there is no protection against using different option values for each node, because it can happen in two cases:

  • During a rolling upgrade procedure, when nodes are restarted one by one with new configuration.
  • When you need different network-specific parameters (e.g. listening IP addresses) for each node.
"},{"location":"configuration/configuration-files/#vmargs","title":"vm.args","text":"

This file contains parameters passed directly to the Erlang VM. To configure it, go to [MongooseIM root]/rel/files/.

Let's explore the default options.

"},{"location":"configuration/configuration-files/#options","title":"Options","text":"
  • -sname - Erlang node name. Can be changed to name, if necessary
  • -setcookie - Erlang cookie. All nodes in a cluster must use the same cookie value.
  • +K - Enables kernel polling. It improves the stability when a large number of sockets is opened, but some systems might benefit from disabling it. Might be a subject of individual load testing.
  • +A 5 - Sets the asynchronous threads number. Async threads improve I/O operations efficiency by relieving scheduler threads of IO waits.
  • +P 10000000 - Process count limit. This is a maximum allowed number of processes running per node. In general, it should exceed the tripled estimated online user count.
  • -env ERL_MAX_PORTS 250000 - Open port count. This is a maximum allowed number of ports opened per node. In general, it should exceed the tripled estimated online user count. Keep in mind that increasing this number also increases the memory usage by a constant amount, so finding the right balance for it is important for every project.
  • -env ERL_FULLSWEEP_AFTER 2 - affects garbage collection. Reduces memory consumption (forces often full g.c.) at the expense of CPU usage.
  • -sasl sasl_error_logger false - MongooseIM's solution for logging is Lager, so SASL error logger is disabled.
"},{"location":"configuration/configuration-files/#appconfig","title":"app.config","text":"

A file with Erlang application configuration. To configure it, go to [MongooseIM root]/rel/files/. By default only the following applications can be found there:

  • logger - check Logger's documentation for more information.
  • ssl
    • session_lifetime (default specified in the file: 600 seconds) - This parameter says for how long should the ssl session remain in the cache for further re-use, should ssl session resumption happen.
"},{"location":"configuration/configuration-files/#configuring-tls-certificates-keys","title":"Configuring TLS: Certificates & Keys","text":"

TLS is configured in one of two ways: some modules need a private key and certificate (chain) in separate files, while others need both in a single file. This is because recent additions use OTP's ssl library, while older modules use p1_tls, respectively.

  • Client-to-server connections need both in the same .pem file
  • Server-to-server connections need both in the same .pem file
  • BOSH, WebSockets and REST APIs need them in separate files

In order to create private key & certificate bundle, you may simply concatenate them.

More information about configuring TLS for these endpoints is available in the listen section configuration page.

"},{"location":"configuration/database-backends-configuration/","title":"Database Backends","text":"

MongooseIM can work with several databases, both RDBMS (SQL) and NoSQL ones. Some of them require extra work before they can be used. For example the SQL databases require defining a schema. MongooseIM is tested with CI, so the CI scripts can be used as a reference.

"},{"location":"configuration/database-backends-configuration/#a-brief-overview","title":"A Brief Overview","text":"

Data in MongooseIM is either transient or persistent:

  • transient: volatile data changing often, such as session data, stream management data, and other in-memory data. These don't need any backup, since after a potential failure, they will naturally rebuild as clients reconnect.
  • persistent: long-lived data, such as roster items, credentials, and chat archives. These absolutely need regular and tested backups.
"},{"location":"configuration/database-backends-configuration/#choosing-a-database-for-mongooseim","title":"Choosing a database for MongooseIM","text":"

Here is some general advice on the use of databases. Subsequent sections go into more depth on each database: what they are suitable for and how to set them up.

Transient data:

  • CETS - a library to synchronise ETS tables between nodes. A new choice to share live data across the MongooseIM cluster. We recommend to use this backend for transient data. This backend requires an RDBMS database configured because we use an external database to discover nodes in the cluster. For a CETS config example, see tutorials.

  • Mnesia - a built-in Erlang Database. Mnesia is fine for a cluster of fixed size with reliable networking between nodes and with nodes rarely restarted. There are some issues when nodes are restarting or new ones joining the cluster. For this case, we recommend to use CETS instead. Mnesia is still the default backend for some modules for compatibility reasons with older config files.

  • Redis - A fantastic choice for storing live data. It's highly scalable and it can be easily shared by multiple MongooseIM nodes. Additionally, Redis' great performance makes it an excellent choice for storing user session data. We recommend caution, since it has not yet been widely tested in production.

Persistent Data:

  • RDBMS - MongooseIM has a strong backend support for relational databases. Reliable and battle proven, they are a great choice for regular MongooseIM use cases and features like privacy lists, vcards, roster, private storage, last activity and message archive. Never loose your data. Use MySQL, MariaDB, PostgreSQL, or MS SQL Server.

  • Cassandra - Only for MAM (Message Archive Management).

  • ElasticSearch - Only for MAM (Message Archive Management).

  • Mnesia - some backends support Mnesia to store data, but it is not recommended. It is still the default option, when not specifying a backend for many modules, so be careful.

    Warning

    We strongly recommend keeping persistent data in an external DB (RDBMS) for production. Mnesia is not suitable for the volumes of persistent data which some modules may require. Sooner or later a migration will be needed which may be painful. It is possible to store all data in Mnesia, but only for testing purposes, not for any serious deployments.

User Data:

  • LDAP - Used for: users, shared rosters, vCards
"},{"location":"configuration/database-backends-configuration/#rdbms","title":"RDBMS","text":""},{"location":"configuration/database-backends-configuration/#mysql","title":"MySQL","text":"

Can be used for:

  • users (credentials)
  • vcards
  • roster
  • private storage
  • privacy/block lists
  • last activity
  • mam (message archive management)
  • muc_light rooms

Setup

The schema files can be found in the priv directory. The default schema is defined in the mysql.sql file.

You can use the following command to apply it on localhost:

mysql -h localhost -u user -p -e 'create database mongooseim'\nmysql -h localhost -u user -p mongooseim < mysql.sql\n

You should also configure the MySQL database in the mongooseim.toml file. Please refer to the RDBMS options for more information.

Version notice

The required minimum version of MySQL is 8.0 because MongooseIM uses the JSON data type and the INSERT INTO ... AS ... query syntax.

"},{"location":"configuration/database-backends-configuration/#postgresql","title":"PostgreSQL","text":"

Can be used for:

  • users (credentials)
  • vcards
  • roster
  • private storage
  • privacy/block lists
  • last activity
  • mam (message archive management)
  • muc_light rooms

Setup

The schema files can be found in the priv directory. The default schema is defined in the pg.sql file.

You can use the following command to apply it on localhost:

psql -h localhost -U user -c \"CREATE DATABASE mongooseim;\"\npsql -h localhost -U user -q -d mongooseim -f pg.sql\n
You should also configure the Postgres database in the mongooseim.toml file. Please refer to the RDBMS options and general database options for more information.

"},{"location":"configuration/database-backends-configuration/#microsoft-sql-server","title":"Microsoft SQL Server","text":"

Microsoft SQL Server, sometimes called MSSQL, or Azure SQL Database.

Warning

MongooseIM can only connect to MSSQL on Ubuntu Xenial x64.

This can be used for:

  • users (credentials)
  • vcards
  • roster
  • private storage
  • privacy/block lists
  • last activity
  • mam (message archive management)
  • muc_light rooms

Setup

MSSQL can be used from MongooseIM through the ODBC layer with FreeTDS driver, so you need them installed on your system.

# Ubuntu\n$ sudo apt install freetds-dev tdsodbc\n\n# CentOS compatible systems (Rocky, Alma)\n$ sudo yum install freetds\n\n# macOS\n$ brew install freetds\n

Then you need to configure the connection. Add your database (mongooseim here) to the /etc/odbc.ini or $HOME/.odbc.ini file:

[mongoose-mssql]\n; Ubuntu\nDriver      = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so\nSetup       = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so\n; CentOS compatible\n; Driver      = /usr/lib64/libtdsodbc.so.0\n; Setup       = /usr/lib64/libtdsS.so\n; macOS\n; Driver      = /usr/local/Cellar/freetds/[current version]/lib/libtdsodbc.so\nServer      = 127.0.0.1\nPort        = 1433\nDatabase    = mongooseim\nCharset     = UTF-8\nTDS_Version = 7.2\nclient_charset = UTF-8\n

Please amend the paths above to match your current OS if necessary.

For more details, please refer to the freetds.conf documentation and unixodbc documentation.

MongooseIM is built with ODBC support by default.

Deadlocks notice

If muc_light's backend is set to ODBC and there are many rooms created in parallel in your system, there may be some deadlocks due to the READ_COMMITTED_SNAPSHOT set to OFF by default. In this case we recommend setting this database property to ON, this will enable row level locking which significantly reduces deadlock chances around muc_light operations.

This property can be set by the following ALTER DATABASE query:

ALTER DATABASE $name_of_your_db SET READ_COMMITTED_SNAPSHOT ON\n

The command above may take some time.

Then you need to import the SQL schema from mssql2012.sql. You can use a Microsoft's GUI tool (the provided .sql files should work with it) or isql, but after a slight modification of the dump file:

cat mssql2012.sql | tr -d '\\r' | tr '\\n' ' ' | sed 's/GO/\\n/g' |\nisql mongoose-mssql username password -b\n

The final step is to configure mongooseim.toml appropriately. Set the following option in the general section:

[general]\n  rdbms_server_type = \"mssql\"\n

Configure the outgoing_pools.rdbms section as follows:

[outgoing_pools.rdbms.default]\n  workers = 5\n\n  [outgoing_pools.rdbms.default.connection]\n    driver = \"odbc\"\n    settings = \"DSN=mongoose-mssql;UID=username;PWD=password\"\n
"},{"location":"configuration/database-backends-configuration/#nosql","title":"NoSQL","text":""},{"location":"configuration/database-backends-configuration/#cassandra","title":"Cassandra","text":"

Setup

This will prepare Cassandra for connection from MongooseIM. Make sure Cassandra is running, open a new terminal window and enter the following commands:

$ cqlsh\n$ cqlsh> source '$REPO/priv/casssandra.cql';\n

"},{"location":"configuration/database-backends-configuration/#elasticsearch","title":"ElasticSearch","text":"

Can be used for:

  • MAM (Message Archive Management)

Setup

Please note that MongooseIM has been tested to work properly with ElasticSearch version 5.6.9.

In order to use ElasticSearch as a MAM backend, you'll need to create required indexes and mappings. From the root of MongooseIM's repository run:

curl -X PUT $ELASTICSEARCH_URL/messages -d '@priv/elasticsearch/pm.json'\ncurl -X PUT $ELASTICSEARCH_URL/muc_messages -d '@priv/elasticsearch/muc.json'\n

where $ELASTICSEARCH_URL is a URL pointing to your ElasticSearch node's HTTP API endpoint.

Please refer to the advanced configuration page to check how to configure MongooseIM to connect to ElasticSearch node.

"},{"location":"configuration/database-backends-configuration/#redis","title":"Redis","text":"

Can be used for:

  • users sessions

Setup

Please refer to the Redis options for more information.

"},{"location":"configuration/database-backends-configuration/#ldap","title":"LDAP","text":"

Can be used for:

  • users (credentials)
  • shared roster
  • vcard

Setup

Please refer to the LDAP options for more information.

"},{"location":"configuration/general/","title":"Options: General","text":"

The general section contains basic settings as well as some miscellaneous options. You can start with providing only the basic options, for example configuring the loglevel, a single host (XMPP domain) as the default, and setting the server language:

[general]\n  loglevel = \"warning\"\n  hosts = [\"my-xmpp-domain.com\"]\n  default_server_domain = \"my-xmpp-domain.com\"\n  language = \"en\"\n

All options are described below.

"},{"location":"configuration/general/#general-options","title":"General options","text":"

These are the basic settings that you should configure before running your MongooseIM server.

"},{"location":"configuration/general/#generalloglevel","title":"general.loglevel","text":"
  • Syntax: string, one of \"none\", \"emergency\", \"alert\", \"critical\", \"error\", \"warning\", \"notice\", \"info\", \"debug\", \"all\".
  • Default: \"warning\"
  • Example: loglevel = \"error\"

Verbosity level of the logger. Values recommended for production systems are \"error\" and \"warning\". The \"debug\" level is good for development.

"},{"location":"configuration/general/#generalhosts","title":"general.hosts","text":"
  • Syntax: array of strings representing the domain names.
  • Default: none. If omitted, at least one host type has to be defined in general.host_types.
  • Example: hosts = [\"localhost\", \"domain2\"]

This option specifies the statically defined XMPP domains served by this cluster. In order to configure these hosts independently, use the host_config section.

Note

At least one of general.hosts or general.host_types have to be provided.

Warning

Extension modules and database backends will be started separately for every domain from this list. When increasing the number of domains, please make sure you have enough resources available (e.g. connection limit set in the DBMS).

"},{"location":"configuration/general/#generalhost_types","title":"general.host_types","text":"
  • Syntax: array of strings the names for host types.
  • Default: none. If omitted, at least one hast has to be defined in general.hosts.
  • Example: host_types = [\"first type\", \"second type\"]

This is the list of names for the types of hosts that will serve dynamic XMPP domains. Each host type can be seen as a label for a group of independent domains that use the same server configuration. In order to configure these host types independently, use the host_config section. The domains can be added or removed dynamically with the command line interface or using the API.

If you use the host type mechanism, make sure you only configure modules which support dynamic domains in the modules or host_config.modules sections. MongooseIM will not start otherwise. Most of the modules are compatible with host types, but please read the particular extension module's page, or the incompatible modules list to see which do not. Moreover, s2s as well as XMPP components (XEP-0114), as configured in the listen.service section, do not support dynamic domains.

Note

At least one of general.hosts or general.host_types have to be provided.

Warning

Extension modules and database backends will be started separately for every host type from this list. When increasing the number of host types, please make sure you have enough resources available (e.g. connection limit set in the DBMS).

"},{"location":"configuration/general/#generaldefault_server_domain","title":"general.default_server_domain","text":"
  • Syntax: a string
  • Default: none, this option is mandatory.
  • Example: default_server_domain = \"my-xmpp-domain.com\"

This domain is used as a default when one cannot be determined, for example when sending XMPP stream errors to unauthenticated clients.

"},{"location":"configuration/general/#generallanguage","title":"general.language","text":"
  • Syntax: string representing the two-letter language code.
  • Default: \"en\"
  • Example: language = \"pl\"

Default language for messages sent by the server to users. You can get a full list of supported codes by executing cd [MongooseIM root] ; ls priv/*.msg | awk '{split($0,a,\"/\"); split(a[4],b,\".\"); print b[1]}' (en is not listed there)

"},{"location":"configuration/general/#database-settings","title":"Database settings","text":"

RDBMS connection pools are set using outgoing connections configuration. There are some additional options that influence all database connections in the server:

"},{"location":"configuration/general/#generalrdbms_server_type","title":"general.rdbms_server_type","text":"
  • Syntax: string, \"mssql\" or \"pgsql\"
  • Default: not set
  • Example: rdbms_server_type = \"mssql\"

When using MSSQL or PostgreSQL databases, this option allows MongooseIM to optimize some queries for these DBs (e.g. mod_mam_rdbms_user uses different queries for mssql).

"},{"location":"configuration/general/#access-management","title":"Access management","text":"

User access rules are configured mainly in the acl and access sections.

"},{"location":"configuration/general/#security","title":"Security","text":"

Here you can find some additional options related to system security.

"},{"location":"configuration/general/#generalregistration_timeout","title":"general.registration_timeout","text":"
  • Syntax: the string \"infinity\" or a number of seconds (positive integer)
  • Default: 600
  • Example: registration_timeout = \"infinity\"

Limits the registration frequency from a single IP address. The special value infinity means no limit.

"},{"location":"configuration/general/#generalhide_service_name","title":"general.hide_service_name","text":"
  • Syntax: boolean
  • Default: false
  • Example: hide_service_name = true

According to RFC 6210, even when a client sends invalid data after opening a connection, the server must open an XML stream and return a stream error anyway. For extra security, this option may be enabled. It changes MIM behaviour to simply close the connection without any errors returned (effectively hiding the server's identity).

"},{"location":"configuration/general/#user-session-management","title":"User session management","text":"

These options can be used to configure the way MongooseIM manages user sessions.

"},{"location":"configuration/general/#generalsm_backend","title":"general.sm_backend","text":"
  • Syntax: string: \"mnesia\", \"cets\" or \"redis\"
  • Default: \"mnesia\"
  • Example: sm_backend = \"redis\"

Backend for storing user session data. All nodes in a cluster must have access to a complete session database. CETS is a new backend, requires RDBMS configured to work properly. Mnesia is a legacy backend, sufficient in most cases, use Redis only in large deployments when you notice issues with the mnesia backend. Requires a redis pool with the default tag defined in the outgoing_pools section. See the section about redis connection setup for more information.

Warning

When set to mnesia or cets, the corresponding internal database has to be enabled.

"},{"location":"configuration/general/#generalreplaced_wait_timeout","title":"general.replaced_wait_timeout","text":"
  • Syntax: positive integer, representing time in milliseconds
  • Default: 2000
  • Example: replaced_wait_timeout = 5000

When a user's session is replaced (due to a full JID conflict) by a new one, this parameter specifies the time MongooseIM waits for the old sessions to close. The default value is sufficient in most cases. If you observe replaced_wait_timeout warning in logs, then most probably the old sessions are frozen for some reason and it should be investigated.

"},{"location":"configuration/general/#xmpp-federation-s2s","title":"XMPP federation (S2S)","text":""},{"location":"configuration/general/#generals2s_backend","title":"general.s2s_backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: s2s_backend = \"cets\"

Backend for replicating the list of outgoing Server to Server (S2S) connections across the nodes of the local MongooseIM cluster.

Warning

The corresponding internal database has to be enabled.

"},{"location":"configuration/general/#external-xmpp-components","title":"External XMPP components","text":""},{"location":"configuration/general/#generalcomponent_backend","title":"general.component_backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: component_backend = \"cets\"

Backend for replicating the list of connected external components across the nodes of the local MongooseIM cluster.

Warning

The corresponding internal database has to be enabled.

"},{"location":"configuration/general/#message-routing","title":"Message routing","text":"

The following options influence the way MongooseIM routes incoming messages to their recipients.

"},{"location":"configuration/general/#generalroute_subdomains","title":"general.route_subdomains","text":"
  • Syntax: string, the only accepted value is \"s2s\"
  • Default: not set
  • Example: route_subdomains = \"s2s\"

If a stanza is addressed to a subdomain of the served domain and this option is set to s2s, such a stanza will be transmitted over a server-to-server connection. Without it, MongooseIM will try to route the stanza to one of its internal services.

"},{"location":"configuration/general/#generalrouting_modules","title":"general.routing_modules","text":"
  • Syntax: a list of strings representing the routing module names.
  • Default: [\"mongoose_router_global\", \"mongoose_router_localdomain\", \"mongoose_router_external_localnode\", \"mongoose_router_external\", \"mongoose_router_dynamic_domains\", \"ejabberd_s2s\"]
  • Example: routing_modules = [\"mongoose_router_global\", \"mongoose_router_localdomain\"]

Provides an ordered list of modules used for routing messages. All available modules are enabled by default, and you can change their order or disable some of them by providing your own list. See the Message routing section of the developer's guide for more information.

"},{"location":"configuration/general/#miscellaneous","title":"Miscellaneous","text":"

The options listed below are used to configure more specific settings, that do not need to be changed in usual use cases.

"},{"location":"configuration/general/#generalall_metrics_are_global","title":"general.all_metrics_are_global","text":"
  • Syntax: boolean
  • Default: false
  • Example: all_metrics_are_global = true

When enabled, all per-host metrics are merged into global equivalents. It means it is no longer possible to view individual host1, host2, host3, ... metrics, only sums are available. This option significantly reduces CPU and (especially) memory footprint in setups with exceptionally many domains (thousands, tens of thousands).

"},{"location":"configuration/general/#generalhttp_server_name","title":"general.http_server_name","text":"
  • Syntax: string
  • Default: \"Cowboy\"
  • Example: http_server_name = \"Apache\"

Replaces Cowboy's default name returned in the server HTTP response header. It may be used for extra security, as it makes it harder for the malicious user to learn what HTTP software is running under a specific port. This option applies to all configured HTTP listeners.

"},{"location":"configuration/general/#generalmax_fsm_queue","title":"general.max_fsm_queue","text":"
  • Syntax: positive integer
  • Default: not set
  • Example: max_fsm_queue = 5000

When specified, will terminate certain processes (e.g. client handlers) that have more messages accumulated in the queue than the specified limit, to prevent resource exhaustion. This option is set for C2S, outgoing S2S and component connections and can be overridden for particular s2s or service listeners in their configurations. Use with caution!

"},{"location":"configuration/general/#generaldomain_certfile","title":"general.domain_certfile","text":"
  • Syntax: array of TOML tables with the following mandatory content:
    • domain - string, XMPP domain name. In case of dynamic domains it should be a host type instead.
    • certfile - string, path in the file system
  • Default: not set
  • Example:
  domain_certfile = [\n    {domain = \"localhost1.com\", certfile = \"cert1.pem\"},\n    {domain = \"localhost2.com\", certfile = \"cert2.pem\"}\n  ]\n

This option overrides the configured certificate file for specific local XMPP domains.

Notes

  • This option applies to S2S and C2S connections.
  • Each domain (or host type) needs to be included in the list of hosts or host types.
"},{"location":"configuration/host_config/","title":"Options: Host config","text":"

The host_config section is used to configure options for specific XMPP domains or for host types, which are used to group multiple domains. For each domain or host type requiring such options, a host_config section needs to be created with the following format:

  • Syntax: domain subsection starts with [[host_config]] and contains the options listed below.
  • Default: none - all domain-level options need to be specified explicitly.
  • Example: see the examples for each section below.

Note

Each hosted domain needs to be included in the list of hosts in the general section. Similarly, each host type needs to be included in general.host_types.

"},{"location":"configuration/host_config/#general-options","title":"General options","text":""},{"location":"configuration/host_config/#host_confighost","title":"host_config.host","text":"
  • Syntax: string, domain name
  • Default: no default, either this option or host_config.host_type is mandatory
  • Example: host = \"my-xmpp-server.com\"

This option specifies the XMPP domain that this section refers to.

"},{"location":"configuration/host_config/#host_confighost_type","title":"host_config.host_type","text":"
  • Syntax: string, host type name
  • Default: no default, either this option or host_config.host is mandatory
  • Example: host_type = \"first type\"

This option specifies the host type that this section refers to.

"},{"location":"configuration/host_config/#configuration-sections","title":"Configuration sections","text":"

The following sections are accepted in host_config:

"},{"location":"configuration/host_config/#host_configgeneral","title":"host_config.general","text":"

The options defined here override the ones defined in the top-level general section. The following options are allowed:

  • route_subdomains
  • replaced_wait_timeout
"},{"location":"configuration/host_config/#example","title":"Example","text":"

The replaced_wait_timeout option is set to 2000 only for domain2.com.

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n  loglevel = \"info\"\n  replaced_wait_timeout = 1000\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.general]\n    replaced_wait_timeout = 2000\n
"},{"location":"configuration/host_config/#host_configauth","title":"host_config.auth","text":"

This section completely overrides the top-level auth section, all options are allowed.

"},{"location":"configuration/host_config/#example_1","title":"Example","text":"

In the example below the number of scram_iterations is increased for domain2. It is necessary to put methods and password.hash and there as well, as otherwise they would not be set for domain2.

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[auth]\n  methods = [\"rdbms\"]\n  password.hash = [\"sha256\"]\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.auth]\n    methods = [\"rdbms\"]\n    password.hash = [\"sha256\"]\n    scram_iterations = 40_000\n
"},{"location":"configuration/host_config/#host_configmodules","title":"host_config.modules","text":"

This section completely overrides the top-level modules section. Remember that only the modules supporting dynamic domains are allowed if you are specifying options for a host type. The ones that do not support it can be found in the modules list.

"},{"location":"configuration/host_config/#example_2","title":"Example","text":"

The modules enabled for domain2.com will be mod_disco and mod_stream_management. If we wanted to enable mod_roster, it would need to be repeated in host_config.

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[modules.mod_disco]\n  users_can_see_hidden_services = false\n\n[modules.mod_roster]\n  backend = \"rdbms\"\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.modules.mod_disco]\n    users_can_see_hidden_services = false\n\n  [host_config.modules.mod_stream_management]\n
"},{"location":"configuration/host_config/#host_configoutgoing_pools","title":"host_config.outgoing_pools","text":"

This section overrides any pool with the same type and tag that was defined in the top-level outgoing_pools section. If we wanted to enable a default rdbms pool only for \"host-type-basic\" for example, we could do so as follows:

[general]\n  host_type = [\"host-type-basic\", \"host-type-advanced\", \"host-type-privacy\"]\n\n[[host_config]]\n  host = \"host-type-basic\"\n\n  [outgoing_pools.rdbms.default]\n    workers = 5\n    [outgoing_pools.rdbms.default.connection]\n    ...\n

Configuration for such pools is all the same, except that the scope key is here disallowed.

"},{"location":"configuration/host_config/#host_configacl","title":"host_config.acl","text":"

The access classes defined here are merged with the ones defined in the top-level acl section - when a class is defined in both places, the result is a union of both classes.

"},{"location":"configuration/host_config/#example_3","title":"Example","text":"

The blocked access class is extended for host_config by adding hacker2.

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[acl]\n  blocked = [\n    {user = \"spammer\"},\n    {user = \"hacker1\"}\n  ]\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.acl]\n    blocked = [\n      {user = \"hacker2\"}\n    ]\n
"},{"location":"configuration/host_config/#host_configaccess","title":"host_config.access","text":"

The access rules defined here are merged with the ones defined in the top-level access section: When a rule is defined in both places:

  • If the top-level rule ends with a catch-all clause {acl = \"all\", value = \"allow\"}, the resulting domain-specific rule has the clauses from both rules with the domain-specific clauses inserted after the top-level ones, but before the catch-all clause.
  • If the top-level rule does not end with a catch-all clause, the resulting domain-specific rule has the clauses from both rules with the domain-specific clauses inserted after the top-level ones.
"},{"location":"configuration/host_config/#example_4","title":"Example","text":"

The c2s access rule defined at the top level allows anyone to connect. However, the rule for domain2.com is extended to prevent the blocked users from connecting:

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[access]\n  c2s = [\n    {acl = \"admin\", value = \"allow\"},\n    {acl = \"all\", value = \"allow\"}\n  ]\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.access]\n    c2s = [\n      {acl = \"blocked\", value = \"deny\"}\n    ]\n\n    register = [\n      {acl = \"all\", value = \"deny\"}\n    ]\n

The resulting rule for domain2.com could be written as:

c2s = [\n  {acl = \"admin\", value = \"allow\"},\n  {acl = \"blocked\", value = \"deny\"},\n  {acl = \"all\", value = \"allow\"}\n]\n

The register rule is defined only for domain2.com.

Note

Some access rules are checked outside of the context of any domain, e.g. the access rule for external components - defining them in host_config would have no effect.

"},{"location":"configuration/host_config/#host_configs2s","title":"host_config.s2s","text":"

This section completely overrides the top-level s2s section, all options are allowed.

"},{"location":"configuration/host_config/#example_5","title":"Example","text":"

The host_policy option is changed for domain2.com:

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[s2s]\n  default_policy = \"deny\"\n\n  host_policy = [\n    {host = \"good-xmpp.org\", policy = \"allow\"},\n    {host = \"bad-xmpp.org\", policy = \"deny\"}\n  ]\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.s2s]\n    host_policy = [\n      {host = \"bad-xmpp.org\", policy = \"allow\"},\n      {host = \"evil-xmpp.org\", policy = \"deny\"}\n    ]\n

Note that default_policy for domain2.com has the default value allow, because host_config.s2s completely overrides the top-level s2s section, and all options are reset to the respective default values, unless they are explicitly changed.

"},{"location":"configuration/internal-databases/","title":"Options: Internal Databases","text":"

Internal databases are used to cluster MongooseIM nodes, and to replicate in-memory data (e.g. client sessions) between them.

Mnesia is a legacy way to cluster MongooseIM nodes. It is also could be used to store persistent data, but we recommend to use RDBMS databases instead because of scalability and stability reasons.

CETS is a new way to cluster MongooseIM nodes. CETS needs to know a list of nodes for the node discovery. There are two ways to get a list of nodes:

  • RDBMS database. MongooseIM would write into RDBMS its nodename and read a list of other nodes. This is the best option if you are already using a relational database.
  • A text file with a list of nodes on each line. It is useful when there is an external script to make this file based on some custom logic (for example, a bash script that uses AWS CLI to discover instances in the autoscaling group). This file would be automatically reread on change.

Omitting this section entirely is equivalent to having only Mnesia enabled:

[internal_databases.mnesia]\n

The following example enables only CETS with the default RDBMS discovery backend:

[internal_databases.cets]\n

Warning

When switching to CETS, you need to configure particular backends to actually use it:

  • general backends: sm_backend, s2s_backend, component_backend
  • module backends: mod_bosh, mod_stream_management, mod_jingle_sip, mod_muc

Sometimes you might want to have both databases enabled and choose which backends use a particular DB:

[internal_databases.mnesia]\n\n[internal_databases.cets]\n
"},{"location":"configuration/internal-databases/#cets-options","title":"CETS Options","text":""},{"location":"configuration/internal-databases/#internal_databasescetsbackend","title":"internal_databases.cets.backend","text":"

Backend for CETS discovery.

  • Syntax: string, one of \"rdbms\", \"file\".
  • Default: \"rdbms\"
  • Example: backend = \"rdbms\"
"},{"location":"configuration/internal-databases/#internal_databasescetscluster_name","title":"internal_databases.cets.cluster_name","text":"

Namespace for the cluster. Only nodes with the same cluster name would be discovered. This option is for RDBMS backend.

  • Syntax: string.
  • Default: \"mongooseim\"
  • Example: cluster_name = \"mongooseim\"
"},{"location":"configuration/internal-databases/#internal_databasescetsnode_list_file","title":"internal_databases.cets.node_list_file","text":"

File to read a list of nodes from. Relative to the MongooseIM's release directory. This option is for the file backend. Required, if backend = \"file\".

  • Syntax: path.
  • Default: not specified.
  • Example: node_list_file = \"/etc/mim_nodes.txt\"
"},{"location":"configuration/internal-databases/#example","title":"Example","text":"

The following example enables CETS with the file discovery backend:

[internal_databases.cets]\n    backend = \"file\"\n    node_list_file = \"cets_disco.txt\"\n
"},{"location":"configuration/listen/","title":"Options: Listen","text":"

The listen section specifies how MongooseIM handles incoming connections.

  • Syntax: Each listener is specified in a subsection starting with [[listen.type]] where type is one of the allowed listener types, handling different types of incoming connections:

    • c2s - client-to-server XMPP connections,
    • s2s - server-to-server XMPP connections,
    • service - XMPP connections from external components,
    • http - HTTP connections from clients or other services.

The double-bracket syntax is used because there can be multiple listeners of a given type, so for each listener type there is a TOML array of one or more tables (subsections).

  • Default: None - each listener needs to be enabled explicitly. Typical listeners are already specified in the example configuration file.
  • Example: The simplest XMPP listener configuration, handling only incoming XMPP client connections:
[[listen.c2s]]\n  port = 5222\n
"},{"location":"configuration/listen/#general-listener-options","title":"General listener options","text":"

The options listed below are the same for all listener types. They set the basic listening socket options. Only port is required, the rest can be used to change the default settings.

"},{"location":"configuration/listen/#listenport","title":"listen.*.port","text":"
  • Syntax: integer, port number
  • Default: no default, this option is mandatory.
  • Example: port = 5222

The port number to which the listening socket is bound.

"},{"location":"configuration/listen/#listenip_address","title":"listen.*.ip_address","text":"
  • Syntax: string with the IP address
  • Default: all-zeros address (e.g. \"0.0.0.0\" for IPv4)
  • Example: ip_address = \"127.0.0.1\"

The IP address to which the listening socket is bound.

"},{"location":"configuration/listen/#listenproto","title":"listen.*.proto","text":"
  • Syntax: string, only \"tcp\" is accepted
  • Default: \"tcp\"
  • Example: proto = \"tcp\"

The protocol, which is TCP by default. Currently this is the only valid option.

"},{"location":"configuration/listen/#listenip_version","title":"listen.*.ip_version","text":"
  • Syntax: integer, 4 or 6
  • Default: if ip_address is specified, the IP version is determined from that address, otherwise it is 4
  • Example: ip_version = 6

Allows to set the IP version to IPv6. Does not need to be set if ip_address is defined.

"},{"location":"configuration/listen/#xmpp-listener-options","title":"XMPP listener options","text":"

The options listed below can be set for the c2s, s2s and service listeners to adjust their parameters.

"},{"location":"configuration/listen/#listenbacklog","title":"listen.*.backlog","text":"
  • Syntax: positive integer
  • Default: 1024
  • Example: backlog = 1000

Overrides the default TCP backlog value.

"},{"location":"configuration/listen/#listenproxy_protocol","title":"listen.*.proxy_protocol","text":"
  • Syntax: boolean
  • Default: false
  • Example: proxy_protocol = true

When set to true, Proxy Protocol is enabled and each connecting client has to provide a proxy header. Use only with a proxy (or a load balancer) to allow it to provide the connection details (including the source IP address) of the original client. Versions 1 and 2 of the protocol are supported.

"},{"location":"configuration/listen/#listenhibernate_after","title":"listen.*.hibernate_after","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 0
  • Example: hibernate_after = 10

Time in milliseconds after which a client process spawned by this listener will hibernate. Hibernation greatly reduces memory consumption of client processes, but may result in increased CPU consumption if a client is used very frequently. The default, recommended value of 0 means that the client processes will hibernate at every opportunity.

"},{"location":"configuration/listen/#listenmax_stanza_size","title":"listen.*.max_stanza_size","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_stanza_size = 10_000

Maximum allowed incoming stanza size in bytes.

Warning

This limit is checked after the input data parsing, so it does not apply to the input data size itself.

"},{"location":"configuration/listen/#listennum_acceptors","title":"listen.*.num_acceptors","text":"
  • Syntax: positive integer
  • Default: 100
  • Example: num_acceptors = 200

The number of processes accepting new connections on the listening socket.

"},{"location":"configuration/outgoing-connections/","title":"Options: Outgoing connections","text":"

MongooseIM can be configured to talk to external services like databases or HTTP servers. The interface for outgoing connections management is available via the outgoing_pools config option for the following types of connections:

  • cassandra - pool of connections to Cassandra cluster
  • redis - pool of connections to Redis server
  • http - pool of connections to an HTTP(S) server MongooseIM can talk to, for example HTTP authentication backend or HTTP notifications
  • elastic - pool of connections to ElasticSearch server
  • rdbms - pool of connections to an RDBMS database
  • rabbit - pool of connections to a RabbitMQ server
  • ldap - pool of connections to an LDAP server

  • Syntax: Each pool is specified in a subsection starting with [outgoing_pools.type.tag], where type is one of available connection types and tag is an arbitrary value uniquely identifying the pool within its type. This allows you to create multiple dedicated pools of the same type.

"},{"location":"configuration/outgoing-connections/#general-pool-options","title":"General pool options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsscope","title":"outgoing_pools.*.*.scope","text":"
  • Syntax: string, one of:\"global\", \"host_type\".
  • Default: \"global\"
  • Example: scope = \"host_type\"

scope can be set to:

  • global - meaning that the pool will be started once no matter how many XMPP hosts are served by MongooseIM.
  • host_type - the pool will be started for each static XMPP host or host type served by MongooseIM.

    Note

    A pool with scope global and tag default is used by services that are not configured by host_type, like service_domain_db or service_mongoose_system_metrics, or by modules that don't support dynamic domains, like mod_pubsub. If a global default pool is not configured, these services will fail.

    Note

    The option host is still supported and behaves equivalent to host_type; however, it is deprecated in favour of the latter.

"},{"location":"configuration/outgoing-connections/#worker-pool-options","title":"Worker pool options","text":"

All pools are managed by the inaka/worker_pool library.

Available options are:

"},{"location":"configuration/outgoing-connections/#outgoing_poolsstrategy","title":"outgoing_pools.*.*.strategy","text":"
  • Syntax: string, one of:\"best_worker\", \"random_worker\", \"next_worker\", \"available_worker\", \"next_available_worker\"
  • Default: \"best_worker\"
  • Example: strategy = \"available_worker\"

Defines worker selection strategy. Consult worker_pool documentation for details.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsworkers","title":"outgoing_pools.*.*.workers","text":"
  • Syntax: positive integer
  • Default: 10 (20 for Cassandra pool)
  • Example: workers = 100

Number of workers to be started by the pool.

"},{"location":"configuration/outgoing-connections/#outgoing_poolscall_timeout","title":"outgoing_pools.*.*.call_timeout","text":"
  • Syntax: positive integer
  • Default: 5000 (60000 for RDBMS pool)
  • Example: call_timeout = 3000

Number of milliseconds after which a call to the pool will time out.

"},{"location":"configuration/outgoing-connections/#connection-options","title":"Connection options","text":"

Options specific to a pool connection are defined in a subsection starting with [outgoing_pools.*.*.connection]. For example:

[outgoing_pools.rdbms.default]\n  scope = \"global\"\n  workers = 5\n\n  [outgoing_pools.rdbms.default.connection]\n  ...\n
"},{"location":"configuration/outgoing-connections/#rdbms-options","title":"RDBMS options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectiondriver","title":"outgoing_pools.rdbms.*.connection.driver","text":"
  • Syntax: string, one of \"pgsql\", \"mysql\" or \"odbc\" (a supported driver)
  • Default: none - this option is mandatory
  • Example: driver = \"psgql\"

Selects the driver for RDBMS connection. The choice of a driver impacts the set of available options.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionkeepalive_interval","title":"outgoing_pools.rdbms.*.connection.keepalive_interval","text":"
  • Syntax: positive integer
  • Default: not set - disabled by default
  • Example: keepalive_interval = 30

When enabled, MongooseIM will send SELECT 1query through every DB connection at given interval to keep them open. This option should be used to ensure that database connections are restarted after they became broken (e.g. due to a database restart or a load balancer dropping connections). Currently, not every network-related error returned from a database driver to a regular query will imply a connection restart.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionquery_timeout","title":"outgoing_pools.rdbms.*.connection.query_timeout","text":"
  • Syntax: positive integer, in milliseconds
  • Default: 5000
  • Example: query_timeout = 5000

How long MongooseIM will wait for the database to answer for a query.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionmax_start_interval","title":"outgoing_pools.rdbms.*.connection.max_start_interval","text":"
  • Syntax: positive integer
  • Default: 30
  • Example: max_start_interval = 30

When MongooseIM fails to connect to the DB, it retries with an exponential backoff. This option limits the backoff time for faster reconnection when the DB becomes reachable again.

"},{"location":"configuration/outgoing-connections/#options-for-pgsql-and-mysql","title":"Options for pgsql and mysql","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionhost","title":"outgoing_pools.rdbms.*.connection.host","text":"
  • Syntax: string
  • Default: no default; required for pgsql and mysql
  • Example: host = \"localhost\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionport","title":"outgoing_pools.rdbms.*.connection.port","text":"
  • Syntax: string
  • Default: 5432 for pgsql; 3306 for mysql
  • Example: port = 5343
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectiondatabase","title":"outgoing_pools.rdbms.*.connection.database","text":"
  • Syntax: string
  • Default: no default; required for pgsql and mysql
  • Example: database = \"mim-db\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionusername","title":"outgoing_pools.rdbms.*.connection.username","text":"
  • Syntax: string
  • Default: no default; required for pgsql and mysql
  • Example: username = \"mim-user\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionpassword","title":"outgoing_pools.rdbms.*.connection.password","text":"
  • Syntax: string
  • Default: no default; required for pgsql and mysql
  • Example: password = \"mim-password\"

To enable TLS, you need to include the TLS section in the connection options. There is one additonal option for PostgreSQL:

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectiontlsrequired","title":"outgoing_pools.rdbms.*.connection.tls.required","text":"
  • Syntax: boolean
  • Default: false
  • Example: tls.required = true

This option can be used to enforce a TLS connection.

"},{"location":"configuration/outgoing-connections/#odbc-options","title":"ODBC options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionsettings","title":"outgoing_pools.rdbms.*.connection.settings","text":"
  • Syntax: string
  • Default: no default; required if the \"odbc\" driver is specified
  • Example: settings = \"DSN=mydb\"

ODBC - specific string defining connection parameters.

"},{"location":"configuration/outgoing-connections/#odbc-ssl-connection-setup","title":"ODBC SSL connection setup","text":"

If you've configured MongooseIM to use an ODBC driver, then the SSL options, along other connection options, should be present in the ~/.odbc.ini file.

To enable SSL connection the sslmode option needs to be set to verify-full. Additionally, you can provide the path to the CA certificate using the sslrootcert option.

"},{"location":"configuration/outgoing-connections/#example-odbcini-configuration","title":"Example ~/.odbc.ini configuration","text":"
[mydb]\nDriver      = ...\nServerName  = ...\nPort        = ...\n...\nsslmode     = verify-full\nsslrootcert = /path/to/ca/cert\n
"},{"location":"configuration/outgoing-connections/#http-options","title":"HTTP options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolshttpconnectionhost","title":"outgoing_pools.http.*.connection.host","text":"
  • Syntax: \"http[s]://string[:integer]\"
  • Default: no default; this option is mandatory
  • Example: host = \"https://server.com:879\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolshttpconnectionpath_prefix","title":"outgoing_pools.http.*.connection.path_prefix","text":"
  • Syntax: string
  • Default: \"/\"
  • Example: path_prefix = \"/api/auth/\"

Initial part of path which will be common to all calls. Prefix will be automatically prepended to path specified by a call to the pool.

"},{"location":"configuration/outgoing-connections/#outgoing_poolshttpconnectionrequest_timeout","title":"outgoing_pools.http.*.connection.request_timeout","text":"
  • Syntax: positive integer
  • Default: 2000 (milliseconds)
  • Example: request_timeout = 5000

Number of milliseconds after which http call to the server will time out. It should be lower than call_timeout set at the pool level.

To enable TLS, you need to include the TLS section in the connection options.

"},{"location":"configuration/outgoing-connections/#redis-specific-options","title":"Redis-specific options","text":"

Redis can be used as a session manager backend. Global distribution (implemented in mod_global_distrib) requires Redis pool.

There are two important limitations:

  • for a session backend, the Tag parameter has to be equal to default
  • redis backend is not compatible with available_worker strategy.
"},{"location":"configuration/outgoing-connections/#outgoing_poolsredisconnectionhost","title":"outgoing_pools.redis.*.connection.host","text":"
  • Syntax: string
  • Default: \"127.0.0.1\"
  • Example: host = \"redis.local\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsredisconnectionport","title":"outgoing_pools.redis.*.connection.port","text":"
  • Syntax: integer, between 0 and 65535, non-inclusive
  • Default: 6379
  • Example: port = 9876
"},{"location":"configuration/outgoing-connections/#outgoing_poolsredisconnectiondatabase","title":"outgoing_pools.redis.*.connection.database","text":"
  • Syntax: non-negative integer
  • Default: 0
  • Example: database = 2

Logical database index (zero-based).

"},{"location":"configuration/outgoing-connections/#outgoing_poolsredisconnectionpassword","title":"outgoing_pools.redis.*.connection.password","text":"
  • Syntax: string
  • Default: \"\"
  • Example: password = \"topsecret\"
"},{"location":"configuration/outgoing-connections/#cassandra-options","title":"Cassandra options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolscassandraconnectionservers","title":"outgoing_pools.cassandra.*.connection.servers","text":"
  • Syntax: a TOML array of tables containing keys \"host\" and \"port\"
  • Default: [{host = \"localhost\", port = 9042}]
  • Example: servers = [{host = \"host_one\", port = 9042}, {host = \"host_two\", port = 9042}]
"},{"location":"configuration/outgoing-connections/#outgoing_poolscassandraconnectionkeyspace","title":"outgoing_pools.cassandra.*.connection.keyspace","text":"
  • Syntax: string
  • Default: \"mongooseim\"
  • Example: keyspace = \"big_mongooseim_database\"

To use plain text authentication (using cqerl_auth_plain_handler module):

"},{"location":"configuration/outgoing-connections/#outgoing_poolscassandraconnectionauthplainusername","title":"outgoing_pools.cassandra.*.connection.auth.plain.username","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: username = \"auser\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolscassandraconnectionauthplainpassword","title":"outgoing_pools.cassandra.*.connection.auth.plain.password","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: password = \"somesecretpassword\"

Support for other authentication modules may be added in the future.

To enable TLS, you need to include the TLS section in the connection options.

"},{"location":"configuration/outgoing-connections/#elasticsearch-options","title":"Elasticsearch options","text":"

Currently, only one pool tagged default can be used.

"},{"location":"configuration/outgoing-connections/#outgoing_poolselasticdefaultconnectionhost","title":"outgoing_pools.elastic.default.connection.host","text":"
  • Syntax: non-empty string
  • Default: \"localhost\"
  • Example: host = \"otherhost\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolselasticdefaultconnectionport","title":"outgoing_pools.elastic.default.connection.port","text":"
  • Syntax: positive integer
  • Default: 9200
  • Example: port = 9211

MongooseIM uses inaka/tirerl library to communicate with ElasticSearch. This library uses worker_pool in a bit different way than MongooseIM does, so the following options are not configurable:

  • call_timeout (infinity)
  • worker selection strategy (available_worker or what's set as default_strategy of worker_pool application)

The only pool-related variable you can tweak is thus the number of workers.

Run the following function in the MongooseIM shell to verify that the connection has been established:

1> mongoose_elasticsearch:health().\n{ok,#{<<\"active_primary_shards\">> => 15,<<\"active_shards\">> => 15,\n       <<\"active_shards_percent_as_number\">> => 50.0,\n       <<\"cluster_name\">> => <<\"docker-cluster\">>,\n       <<\"delayed_unassigned_shards\">> => 0,\n       <<\"initializing_shards\">> => 0,\n       <<\"number_of_data_nodes\">> => 1,\n       <<\"number_of_in_flight_fetch\">> => 0,\n       <<\"number_of_nodes\">> => 1,\n       <<\"number_of_pending_tasks\">> => 0,\n       <<\"relocating_shards\">> => 0,\n       <<\"status\">> => <<\"yellow\">>,\n       <<\"task_max_waiting_in_queue_millis\">> => 0,\n       <<\"timed_out\">> => false,\n       <<\"unassigned_shards\">> => 15}}\n

Note that the output might differ based on your ElasticSearch cluster configuration.

"},{"location":"configuration/outgoing-connections/#rabbitmq-options","title":"RabbitMQ options","text":"

The Tag parameter must be set to event_pusher in order to be able to use the pool for mod_event_pusher_rabbit. Any other Tag can be used for other purposes.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionhost","title":"outgoing_pools.rabbit.*.connection.host","text":"
  • Syntax: string
  • Default: \"localhost\"
  • Example: host = \"anotherhost\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionport","title":"outgoing_pools.rabbit.*.connection.port","text":"
  • Syntax: integer
  • Default: 5672
  • Example: port = 4561
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionusername","title":"outgoing_pools.rabbit.*.connection.username","text":"
  • Syntax: string
  • Default: \"guest\"
  • Example: username = \"corpop\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionpassword","title":"outgoing_pools.rabbit.*.connection.password","text":"
  • Syntax: string
  • Default: \"guest\"
  • Example: password = \"guest\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionconfirms_enabled","title":"outgoing_pools.rabbit.*.connection.confirms_enabled","text":"
  • Syntax: boolean
  • Default: false
  • Example: confirms_enabled = false

Enables/disables one-to-one publishers confirms.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionmax_worker_queue_len","title":"outgoing_pools.rabbit.*.connection.max_worker_queue_len","text":"
  • Syntax: non-negative integer or \"infinity\"
  • Default: 1000
  • Example: max_worker_queue_len = \"infinity\"

Sets a limit of messages in a worker's mailbox above which the worker starts dropping the messages. If a worker message queue length reaches the limit, messages from the head of the queue are dropped until the queue length is again below the limit. Use infinity to disable.

"},{"location":"configuration/outgoing-connections/#ldap-options","title":"LDAP options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionservers","title":"outgoing_pools.ldap.*.connection.servers","text":"
  • Syntax: an array of strings
  • Default: [\"localhost\"]
  • Example: servers = [\"ldap_one\", \"ldap_two\"]
"},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionport","title":"outgoing_pools.ldap.*.connection.port","text":"
  • Syntax: integer
  • Default: 389 (or 636 if TLS is enabled)
  • Example: port = 800
"},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionroot_dn","title":"outgoing_pools.ldap.*.connection.root_dn","text":"
  • Syntax: string
  • Default: empty string
  • Example: root_dn = \"cn=admin,dc=example,dc=com\"

Leaving out this option makes it an anonymous connection, which most likely is what you want.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionpassword","title":"outgoing_pools.ldap.*.connection.password","text":"
  • Syntax: string
  • Default: empty string
  • Example: password = \"topsecret\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionconnect_interval","title":"outgoing_pools.ldap.*.connection.connect_interval","text":"
  • Syntax: positive integer
  • Default: 10000
  • Example: connect_interval = 20000

Reconnect interval after a failed connection.

To enable TLS, you need to include the TLS section in the connection options.

"},{"location":"configuration/outgoing-connections/#tls-options","title":"TLS options","text":"

TLS options for a given pool type/tag pair are defined in a subsection starting with [outgoing_pools.[pool_type].[pool_tag].connection.tls].

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsverify_mode","title":"outgoing_pools.*.*.connection.tls.verify_mode","text":"
  • Syntax: string, one of: \"peer\", \"selfsigned_peer\", \"none\"
  • Default: \"peer\"
  • Example: tls.verify_mode = \"none\"

Specifies the way server certificate verification works:

  • peer - makes sure the server certificate is valid and signed by a trusted CA. Requires a valid cacertfile.
  • selfsigned_peer - makes sure the server certificate is valid, but allows self-signed certificates. Requires a valid cacertfile.
  • none - server certificate is not checked.
"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlscertfile","title":"outgoing_pools.*.*.connection.tls.certfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.certfile = \"server.pem\"

Path to the X509 PEM file with a certificate. If the certificate is signed by an intermediate CA, you should specify here the whole CA chain by concatenating all public keys together.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlscacertfile","title":"outgoing_pools.*.*.connection.tls.cacertfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.cacertfile = \"ca.pem\"

Path to the X509 PEM file with a CA chain that will be used to verify clients. It won't have any effect if verify_mode is set to \"none\".

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlskeyfile","title":"outgoing_pools.*.*.connection.tls.keyfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.keyfile = \"key.pem\"

Path to the X509 PEM file with the private key.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlspassword","title":"outgoing_pools.*.*.connection.tls.password","text":"
  • Syntax: string
  • Default: not set
  • Example: tls.password = \"secret\"

Password to the X509 PEM file with the private key.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsciphers","title":"outgoing_pools.*.*.connection.tls.ciphers","text":"
  • Syntax: string with the OpenSSL cipher suite specification
  • Default: not set, all supported cipher suites are accepted
  • Example: tls.ciphers = \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384\"

Cipher suites to use. Please refer to the OpenSSL documentation for the cipher string format. For allowed values, see the Erlang/OTP SSL documentation.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsversions","title":"outgoing_pools.*.*.connection.tls.versions","text":"
  • Syntax: list of strings
  • Default: not set, all supported versions are accepted
  • Example: tls.versions = [\"tlsv1.2\", \"tlsv1.3\"]

TLS protocol versions to use. For allowed values, see the Erlang/OTP SSL documentation

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsserver_name_indicationenabled","title":"outgoing_pools.*.*.connection.tls.server_name_indication.enabled","text":"
  • Syntax: boolean
  • Default: \"true\", but effective only if verify_mode is not \"none\".
  • Example: tls.server_name_indication.enabled = false

Enables SNI extension to TLS protocol. You can set it to false to disable the extension.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsserver_name_indicationhost","title":"outgoing_pools.*.*.connection.tls.server_name_indication.host","text":"
  • Syntax: string
  • Default: not set
  • Example: tls.server_name_indication.host = \"domain.com\"

Domain against which the certificates will be checked, using SNI.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsserver_name_indicationprotocol","title":"outgoing_pools.*.*.connection.tls.server_name_indication.protocol","text":"
  • Syntax: string, one of \"default\" or \"https\"
  • Default: \"default\"
  • Example: tls.server_name_indication_protocol = \"https\"

See the OTP documentation for an explanation. You'd usually want to set it to \"https\" for reasons described in the security recommendations.

"},{"location":"configuration/release-options/","title":"Release Options","text":"

When building a MongooseIM release from source code, the initial configuration files are generated with options taken from the vars-toml.config file found in the [MongooseIM root]/rel/ directory. You can change the values in this file to affect the resulting vm.args and mongooseim.toml files.

The file contains erlang tuples terminated with period ('.'). For users not familiar with Erlang syntax, here is a quick cheat sheet:

  • Each config option (key and value) is a tuple. Tuples are (Erlangers, forgive us the simplification) other Erlang terms separated with commas and enclosed in curly brackets ({}).
  • Tuples (at least the top-level ones) in vars.config are always 2-element.
  • The first element of each tuple is the name (Erlang atom).
  • The second element is a quoted string. Any quotes (\") inside the string should be escaped with a backslash (\\).

There are two types of options: parameters and blocks:

  • a parameter is inserted into the value of an already defined option. Parameters are mandatory - a valid value has to be provided.
  • a block can be an empty string, one line or multiple lines, defining zero, one or more options. Blocks are optional - the default is an empty string.
"},{"location":"configuration/release-options/#vmargs-options","title":"vm.args options","text":"

These options are inserted into the rel/files/vm.args template.

"},{"location":"configuration/release-options/#node_name","title":"node_name","text":"
  • Type: parameter
  • Option: value of -sname in vm.args
  • Syntax: Erlang node name: name@host
  • Example: {node_name, \"mongooseim@localhost\"}.
"},{"location":"configuration/release-options/#highload_vm_args","title":"highload_vm_args","text":"
  • Type: block
  • Option: arguments in vm.args: +K, +A, +P, -env ERL_MAX_PORTS
  • Syntax: command-line arguments
  • Example: {highload_vm_args, \"+P 10000000 -env ERL_MAX_PORTS 250000\"}.
"},{"location":"configuration/release-options/#epmd_module","title":"epmd_module","text":"

Allows to set EPMD module to mongoose_epmd in case CETS is used with RDBMS backend to enable getting IP addresses of the remote nodes using RDBMS instead of the default resolver.

  • Type: parameter
  • Option: value of -epmd_module in vm.args
  • Syntax: Erlang module name: mongoose_epmd
  • Example: {epmd_module, \"mongoose_epmd\"}.
"},{"location":"configuration/release-options/#toml-options","title":"TOML Options","text":"

These options are inserted into the rel/files/mongooseim.toml template.

"},{"location":"configuration/release-options/#hosts","title":"hosts","text":"
  • Type: parameter
  • Option: general.hosts
  • Syntax: comma-separated list of strings
  • Example: {hosts, \"\\\"localhost\\\", \\\"domain2\\\"\"}.
"},{"location":"configuration/release-options/#host_config","title":"host_config","text":"
  • Type: block
  • Option: host_config
  • Syntax: TOML block, one or more [[host_config]] sections.
  • Example:
{host_config, \"\n[[host_config]]\n  host = \\\"anonymous.localhost\\\"\n\n  [host_config.auth]\n    methods = [\\\"anonymous\\\"]\n\"}.\n
"},{"location":"configuration/release-options/#auth_ldap","title":"auth_ldap","text":"
  • Type: block
  • Option: auth.ldap
  • Syntax: TOML block, the [auth.ldap] subsection
  • Example:
{auth_ldap, \"\n  [auth.ldap]\n    base = \\\"ou=Users,dc=esl,dc=com\\\"\n    filter = \\\"(objectClass=inetOrgPerson)\\\"\n\"}.\n
"},{"location":"configuration/release-options/#all_metrics_are_global","title":"all_metrics_are_global","text":"
  • Type: parameter
  • Option: general.all_metrics_are_global
  • Syntax: boolean
  • Example: {all_metrics_are_global, \"false\"}.
"},{"location":"configuration/release-options/#s2s_addr","title":"s2s_addr","text":"
  • Type: block
  • Option: auth.s2s.address
  • Syntax: TOML key-value pair with the address option
  • Example:
{s2s_addr, \"\n  address = [\n    {host = \\\"my.xmpp.org\\\", ip_address = \\\"192.0.100.1\\\"},\n    {host = \\\"your.xmpp.org\\\", ip_address = \\\"192.0.1.100\\\", port = 5271}\n  ]\n\"}.\n
"},{"location":"configuration/release-options/#s2s_default_policy","title":"s2s_default_policy","text":"
  • Type: parameter
  • Option: s2s.default_policy
  • Syntax: string
  • Example: {s2s_default_policy, \"\\\"deny\\\"\"}.
"},{"location":"configuration/release-options/#outgoing_s2s_port","title":"outgoing_s2s_port","text":"
  • Type: parameter
  • Option: s2s.outgoing.port
  • Syntax: integer
  • Example: {outgoing_s2s_port, \"5269\"}.
"},{"location":"configuration/release-options/#c2s_port","title":"c2s_port","text":"
  • Type: parameter
  • Option: listen.c2s.port
  • Syntax: integer
  • Example: {c2s_port, \"5222\"}.
"},{"location":"configuration/release-options/#s2s_port","title":"s2s_port","text":"
  • Type: parameter
  • Option: listen.s2s.port
  • Syntax: integer
  • Example: {s2s_port, \"5269\"}.
"},{"location":"configuration/release-options/#cowboy_port","title":"cowboy_port","text":"
  • Type: parameter
  • Option: listen.http.port
  • Syntax: integer
  • Example: {http_port, \"5280\"}.
"},{"location":"configuration/release-options/#mod_last","title":"mod_last","text":"
  • Type: block
  • Option: modules.mod_last
  • Syntax: TOML section: [modules.mod_last]
  • Example: {mod_last, \"[modules.mod_last]\"}.
"},{"location":"configuration/release-options/#mod_offline","title":"mod_offline","text":"
  • Type: block
  • Option: modules.mod_offline
  • Syntax: TOML section: [modules.mod_offline]
  • Example:
{mod_offline, \"\n[modules.mod_offline]\n  access_max_user_messages = \\\"max_user_offline_messages\\\"\n\"}.\n
"},{"location":"configuration/release-options/#mod_privacy","title":"mod_privacy","text":"
  • Type: block
  • Option: modules.mod_privacy
  • Syntax: TOML section: [modules.mod_privacy]
  • Example: {mod_privacy, \"[modules.mod_privacy]\"}.
"},{"location":"configuration/release-options/#mod_private","title":"mod_private","text":"
  • Type: block
  • Option: modules.mod_private
  • Syntax: TOML section: [modules.mod_private]
  • Example: {mod_private, \"[modules.mod_private]\"}.
"},{"location":"configuration/release-options/#mod_roster","title":"mod_roster","text":"
  • Type: block
  • Option: modules.mod_roster
  • Syntax: TOML section: [modules.mod_roster]
  • Example: {mod_roster, \"[modules.mod_roster]\"}.
"},{"location":"configuration/release-options/#mod_vcard","title":"mod_vcard","text":"
  • Type: block
  • Option: modules.mod_vcard
  • Syntax: TOML section: [modules.mod_vcard]
  • Example:
{mod_vcard, \"\n[modules.mod_vcard]\n  host = \\\"vjud.@HOST@\\\"\n\"}.\n
"},{"location":"configuration/release-options/#sm_backend","title":"sm_backend","text":"
  • Type: parameter
  • Option: general.sm_backend
  • Syntax: string
  • Example: {sm_backend, \"\\\"redis\\\"\"}.
"},{"location":"configuration/release-options/#s2s_backend","title":"s2s_backend","text":"
  • Type: parameter
  • Option: general.s2s_backend
  • Syntax: string
  • Example: {s2s_backend, \"\\\"mnesia\\\"\"}.
"},{"location":"configuration/release-options/#tls_config","title":"tls_config","text":"
  • Type: block
  • Option: listen.c2s.tls.*
  • Syntax: TOML key-value pairs
  • Example:
{tls_config, \"\n  tls.certfile = \\\"priv/ssl/fake_server.pem\\\"\n  tls.mode = \\\"starttls\\\"\n\"}.\n
"},{"location":"configuration/release-options/#auth_method","title":"auth_method","text":"
  • Type: parameter
  • Option: auth.methods
  • Syntax: comma-separated list of strings
  • Example: {auth_method, \"\\\"internal\\\"\"}.
"},{"location":"configuration/s2s/","title":"Options: S2S","text":"

The s2s section contains options configuring the server-to-server connections used to communicate with other federated XMPP servers.

Warning

Server-to-server connections do not support dynamic domains. Do not use dynamic domains when using s2s.

"},{"location":"configuration/s2s/#general-options","title":"General options","text":"

These options affect both incoming and outgoing S2S connections.

"},{"location":"configuration/s2s/#s2sdefault_policy","title":"s2s.default_policy","text":"
  • Syntax: string, \"allow\" or \"deny\"
  • Default: \"allow\"
  • Example: default_policy = \"deny\"

Default policy for opening new S2S connections to/from remote servers.

"},{"location":"configuration/s2s/#s2shost_policy","title":"s2s.host_policy","text":"
  • Syntax: array of TOML tables with the following mandatory content:
    • host - string, host name
    • policy - string, \"allow\" or \"deny\"
  • Default: not set, default_policy is used
  • Example:
  host_policy = [\n    {host = \"good.xmpp.org\", policy = \"allow\"},\n    {host = \"bad.xmpp.org\", policy = \"deny\"}\n  ]\n

Policy for opening new connections to/from specific remote servers.

"},{"location":"configuration/s2s/#s2suse_starttls","title":"s2s.use_starttls","text":"
  • Syntax: string, one of \"false\", \"optional\", \"required\", \"required_trusted\"
  • Default: \"false\"
  • Example: use_starttls = \"required\"

Allows to configure StartTLS for incoming and outgoing S2S connections:

  • false - StartTLS is disabled,
  • optional - StartTLS is supported,
  • required - StartTLS is supported and enforced,
  • required_trusted - StartTLS is supported and enforced with certificate verification.
"},{"location":"configuration/s2s/#s2scertfile","title":"s2s.certfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: certfile = \"cert.pem\"

Path to the X509 PEM file with a certificate and a private key inside (not protected by any password). Required if use_starttls is not false.

"},{"location":"configuration/s2s/#s2sshared","title":"s2s.shared","text":"
  • Syntax: string
  • Default: 10 strong random bytes, hex-encoded
  • Example: shared = \"82gc8b23ct7824\"

S2S shared secret used in the Server Dialback extension.

"},{"location":"configuration/s2s/#outgoing-connections","title":"Outgoing connections","text":"

The options listed below affect only the outgoing S2S connections.

"},{"location":"configuration/s2s/#s2saddress","title":"s2s.address","text":"
  • Syntax: array of TOML tables with the following content:
    • host - string, mandatory, host name
    • ip_address - string, mandatory, IP address
    • port - integer, optional, port number
  • Default: not set
  • Example:
  address = [\n    {host = \"my.xmpp.org\", ip_address = \"192.0.100.1\"},\n    {host = \"your.xmpp.org\", ip_address = \"192.0.1.100\", port = 5271}\n  ]\n

This option defines IP addresses and port numbers for specific non-local XMPP domains, allowing to override the DNS lookup for outgoing S2S connections.

"},{"location":"configuration/s2s/#s2sciphers","title":"s2s.ciphers","text":"
  • Syntax: string
  • Default: \"TLSv1.2:TLSv1.3\"
  • Example: ciphers = \"TLSv1.2\"

Defines a list of accepted SSL ciphers for outgoing S2S connections. Please refer to the OpenSSL documentation for the cipher string format.

"},{"location":"configuration/s2s/#s2smax_retry_delay","title":"s2s.max_retry_delay","text":"
  • Syntax: positive integer
  • Default: 300
  • Example: max_retry_delay = 300

Specifies the maximum time in seconds that MongooseIM will wait until the next attempt to connect to a remote XMPP server. The delays between consecutive attempts will be doubled until this limit is reached.

"},{"location":"configuration/s2s/#s2soutgoingport","title":"s2s.outgoing.port","text":"
  • Syntax: integer, port number
  • Default: 5269
  • Example: outgoing.port = 5270

Defines the port to be used for outgoing S2S connections.

"},{"location":"configuration/s2s/#s2soutgoingip_versions","title":"s2s.outgoing.ip_versions","text":"
  • Syntax: array of integers (IP versions): 4 or 6
  • Default: [4, 6]
  • Example: outgoing.ip_versions = [6]

Specifies the order of IP address families to try when establishing an outgoing S2S connection.

"},{"location":"configuration/s2s/#s2soutgoingconnection_timeout","title":"s2s.outgoing.connection_timeout","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 10_000
  • Example: outgoing.connection_timeout = 5000

Timeout (in milliseconds) for establishing an outgoing S2S connection.

"},{"location":"configuration/s2s/#s2sdnstimeout","title":"s2s.dns.timeout","text":"
  • Syntax: positive integer
  • Default: 10
  • Example: dns.timeout = 30

Timeout (in seconds) for DNS lookups when opening an outgoing S2S connection.

"},{"location":"configuration/s2s/#s2sdnsretries","title":"s2s.dns.retries","text":"
  • Syntax: positive integer
  • Default: 2
  • Example: dns.retries = 1

Number of DNS lookup attempts when opening an outgoing S2S connection.

"},{"location":"configuration/shaper/","title":"Options: Shaper","text":"

The shaper section specifies traffic shapers used to limit the incoming XMPP traffic, providing a safety valve to protect the server. It can be used to prevent DoS attacks or to calm down too noisy clients.

  • Syntax: each shaper is specified in a subsection starting with [shaper.name] where name is used to uniquely identify the shaper.
  • Default: no default - each shaper needs to be specified explicitly.
  • Example: the normal shaper is used for the C2S connections.
[shaper.normal]\n  max_rate = 1000\n
"},{"location":"configuration/shaper/#traffic-shaper-options","title":"Traffic shaper options","text":""},{"location":"configuration/shaper/#shapermaxrate","title":"shaper.maxrate","text":"
  • Syntax: positive integer
  • Default: no default, this option is mandatory
  • Example: maxrate = 1000

Defines the maximum accepted rate. For the shapers used by XMPP listeners this is the number of bytes per second, but there are shapers that use different units, e.g. MAM shapers.

"},{"location":"configuration/shaper/#examples","title":"Examples","text":"

The following examples show the typical shaper definitions.

"},{"location":"configuration/shaper/#c2s-shaper","title":"C2S Shaper","text":"

This is the typical definition of an XMPP shaper, which accepts the maximum data rate of 1 kbps. When the rate is exceeded, the receiver pauses before processing the next packet.

[shaper.normal]\n  max_rate = 1000\n

To make use of it, the corresponding rule should be defined in the access section. Finally, the C2S listener has to be configured to use the defined shaper - see the C2S Example.

"},{"location":"configuration/shaper/#s2s-shaper","title":"S2S Shaper","text":"

For S2S connections we need to increase the limit as they receive the accumulated traffic from multiple users - e.g. to 50 kbps:

[shaper.fast]\n  max_rate = 50_000\n

To make use of it, the corresponding rule should be defined in the access section. Finally, the S2S listener has to be configured to use the defined shaper - see the S2S Example.

"},{"location":"configuration/shaper/#mam-shapers","title":"MAM Shapers","text":"

These shapers limit the number of MAM operations per second (rather than bytes per second).

[shaper.mam_shaper]\n  max_rate = 1\n\n[shaper.mam_global_shaper]\n  max_rate = 1000\n

To make use of them, the corresponding rules should be defined in the access section.

"},{"location":"developers-guide/Basic-iq-handler/","title":"Basic IQ Handler","text":"

XMPP stands for Extensible Messaging and Presence Protocol. One way the protocol can be extended is by defining new types of queries, or IQs, that XMPP entities should be able to handle. It's usual that a XEP defining some XMPP extension contains some new type of IQ. IQs can also be used to implement custom features - required in a particular problem domain - but not defined by any official XEP.

This tutorial will show you how to add and test a simple module with an IQ handler to MongooseIM. gen_iq_handler module provides functionality for registering IQ handlers for specific namespaces.

"},{"location":"developers-guide/Basic-iq-handler/#clone-build","title":"Clone & build","text":"

See How-to-build for details on building MongooseIM from source code.

"},{"location":"developers-guide/Basic-iq-handler/#create-a-module-add-a-basic-iq-handler","title":"Create a module & add a basic IQ handler","text":"

Go to src/ and create a basic module implementing the gen_mod behaviour. In start/2 register the IQ handler with a specified namespace, type (IQ processing policy), and function which will handle the incoming IQ stanza. In stop/1 remove the registered handler. Implement the function for handler:

  • If the incoming IQ stanza is of type get or set it will be returned with the type set to result.

  • If the server doesn't recognise the hostname, the returning stanza will be of type error.

See Server Rules for Processing XML Stanzas for more detailed information on the topic.

-module(mod_iq_example).\n-behaviour(gen_mod).\n\n-include(\"mongoose.hrl\").\n-include(\"jlib.hrl\").\n\n%% gen_mod callbacks\n-export([start/2, stop/1]).\n\n%% IQ handlers\n-export([process_iq/4]).\n\nstart(HostType, _Opts) ->\n    gen_iq_handler:add_iq_handler_for_domain(HostType, <<\"erlang-solutions.com:example\">>,\n                                  ejabberd_sm, process_iq, #{}, no_queue).\nstop(HostType) ->\n    gen_iq_handler:remove_iq_handler_for_domain(HostType, <<\"erlang-solutions.com:example\">>, ejabberd_sm).\n\nprocess_iq(_From, _To, Acc, IQ) ->\n    IQRes = IQ#iq{type = result},\n    ?LOG_INFO(#{what => example_handler, acc => Acc, iq_result => IQRes}),\n    {Acc, IQRes}.\n
"},{"location":"developers-guide/Basic-iq-handler/#test-your-handler","title":"Test your handler","text":"

Go to big_tests/tests and create a test suite for your handler. Implement the test case for success and failure. We will register two users, which are predefined in $REPO/big_tests/test.config:

{alice, [\n    {username, <<\"alicE\">>},\n    {server, <<\"localhost\">>},\n    {password, <<\"matygrysa\">>}]},\n{alice_bis, [\n    {username, <<\"alicE\">>},\n    {server, <<\"localhost.bis\">>},\n    {host, <<\"localhost\">>},\n    {password, <<\"matygrysa\">>}]},\n

Our IQ handler will be enabled only for one domain, localhost. After sending an IQ stanza to alice we should get a result, but as our IQ handler is not enabled for localhost.bis domain, we should get an error.

-module(mod_iq_example_SUITE).\n\n-export([all/0,\n         groups/0,\n         suite/0,\n         init_per_suite/1,\n         end_per_suite/1,\n         init_per_group/2,\n         end_per_group/2,\n         init_per_testcase/2,\n         end_per_testcase/2]).\n\n%% Tests\n-export([should_return_result/1,\n         should_return_error/1]).\n\n-include_lib(\"exml/include/exml.hrl\").\n\n-define(EXAMPLE_NS, <<\"erlang-solutions.com:example\">>).\n-define(USERS, [alice, alice_bis]).\n\n-import(distributed_helper, [mim/0,\n                             require_rpc_nodes/1,\n                             rpc/4]).\n\n%%--------------------------------------------------------------------\n%% Suite configuration\n%%--------------------------------------------------------------------\n\nall() ->\n    [{group, mod_iq_example}].\n\ngroups() ->\n    G = [{mod_iq_example, [], [should_return_result,\n                               should_return_error]}],\n    ct_helper:repeat_all_until_all_ok(G).\n\nsuite() ->\n    require_rpc_nodes([mim]) ++ escalus:suite().\n\n%%--------------------------------------------------------------------\n%% Init & teardown\n%%--------------------------------------------------------------------\n\ninit_per_suite(Config) ->\n    Domain = ct:get_config({hosts, mim, domain}),\n    dynamic_modules:start(Domain, mod_iq_example, [no_opts]),\n    escalus:init_per_suite(Config).\n\nend_per_suite(Config) ->\n    Domain = ct:get_config({hosts, mim, domain}),\n    dynamic_modules:stop(Domain, mod_iq_example),\n    escalus:end_per_suite(Config).\n\ninit_per_group(_, Config) ->\n    escalus:create_users(Config, ?USERS).\n\nend_per_group(_, Config) ->\n    escalus:delete_users(Config, ?USERS).\n\ninit_per_testcase(CaseName, Config) ->\n    escalus:init_per_testcase(CaseName, Config).\n\nend_per_testcase(CaseName, Config) ->\n    escalus:end_per_testcase(CaseName, Config).\n\n%%--------------------------------------------------------------------\n%% Tests\n%%--------------------------------------------------------------------\n\nshould_return_result(Config) ->\n    %% given\n    escalus:story(Config, [{alice, 1}], fun(Alice) ->\n        %% when sending a request\n        Req = escalus_stanza:iq_get(?EXAMPLE_NS, [#xmlel{name = <<\"example\">>}]),\n        ct:pal(\"req: ~p\", [Req]),\n        escalus:send(Alice, Req),\n        %% then we should get a result\n        Res = escalus:wait_for_stanza(Alice),\n        ct:pal(\"res: ~p\", [Res]),\n        escalus:assert(is_iq, [<<\"result\">>, ?EXAMPLE_NS], Res)\n    end).\n\nshould_return_error(Config) ->\n    %% given\n    escalus:story(Config, [{alice_bis, 1}], fun(Alice) ->\n        %% when sending a request with unregistered server\n        Req = escalus_stanza:iq_get(?EXAMPLE_NS, [#xmlel{name = <<\"example\">>}]),\n        ct:pal(\"req: ~p\", [Req]),\n        escalus:send(Alice, Req),\n        %% then we should get an error\n        Res = escalus:wait_for_stanza(Alice),\n        ct:pal(\"res: ~p\", [Res]),\n        escalus:assert(is_iq, [<<\"error\">>, ?EXAMPLE_NS], Res),\n        escalus:assert(is_error, [<<\"cancel\">>, <<\"service-unavailable\">>], Res)\n    end).\n
"},{"location":"developers-guide/Basic-iq-handler/#run-it","title":"Run it","text":"

Compile & generate releases for testing purposes according to How-to-build. Go to $REPO/_build/mim1/rel/mongooseim and start one MongooseIM node.

bin/mongooseim live\n
Open up a new terminal window, go to $REPO and use the test runner. Run single suite with the already started mim1 node.

source tools/test-runner-complete.sh\ntest-runner.sh --rerun-big-tests -- mod_iq_example\n
"},{"location":"developers-guide/Bootstrap-Scripts/","title":"Bootstrap scripts","text":"

The scripts are located in the rel/files/scripts/ directory in the MongooseIM repository.

By default the bootstrap command executes bootstrap01-hello.sh, which just prints the information below:

./_build/prod/rel/mongooseim/bin/mongooseimctl bootstrap\n\nExecute /Users/mikhailuvarov/erlang/esl/MongooseIM/_build/prod/rel/mongooseim/scripts/bootstrap01-hello.sh\nHello from /Users/mikhailuvarov/erlang/esl/MongooseIM/_build/prod/rel/mongooseim/scripts/bootstrap01-hello.sh script.\nMongooseIM is installed into /Users/mikhailuvarov/erlang/esl/MongooseIM/_build/prod/rel/mongooseim\n

Execution of scripts stops with an error, if any of scripts fail.

Environment variables, available from scripts:

  • ERTS_PATH - path to Erlang Runtime System, used by MongooseIM.
  • MIM_DIR - MongooseIM release installation directory.
"},{"location":"developers-guide/Bootstrap-Scripts/#templating-bootstrap-script","title":"Templating bootstrap script","text":"

The script bootstrap20-template.escript renders files from the templates/ directory and writes result files into the etc/ directory. If you need the result files in a separate directory, create another script bootstrap30-template.sh, that moves files into a proper location.

The etc/templates.ini file contains default template variables.

A template config example:

[options]\n  demo_session_lifetime = 600\n  demo_tls_versions = 'tlsv1.2', 'tlsv1.3'\n

Only lowercase variables are allowed in templates.ini.

You can redeclare options using environment variables when executing the bootstrap script:

MIM_DEMO_SESSION_LIFETIME=700 mongooseimctl bootstrap\n

Environment variables should have a MIM_ prefix. The variable names are case-insensitive (but we suggest to use the uppercase variable names for consistency).

"},{"location":"developers-guide/Bootstrap-Scripts/#demo-template","title":"Demo template","text":"

A demo template is located in rel/files/templates/demo.config. It is copied into the /templates directory inside your release directory.

"},{"location":"developers-guide/Bootstrap-Scripts/#testing-templating-scripts","title":"Testing templating scripts","text":"

Templating script source code: rel/files/scripts/bootstrap20-template.escript.

Testing script code:

tools/pkg/scripts/smoke_test.sh\ntools/pkg/scripts/smoke_templates.escript\n

Testing command:

PRESET=pkg pkg_PLATFORM=ubuntu_xenial ESL_ERLANG_PKG_VER=23.3.1-2 ./tools/test.sh\n
"},{"location":"developers-guide/Hooks-and-handlers/","title":"Hooks, handlers and accumulators","text":"

The hooks and handlers mechanism is one of the core architectural features of MongooseIM. It allows for loose coupling between components of the system by calling only those which are available and configured to be used at runtime.

It can be thought of as a simple eventing mechanism notifying about certain things happening in the server. That results in an extensible system with pluggable extra functionality.

To focus our attention, we'll analyze mod_offline which is responsible for storing messages for delivery to users unavailable at the time of sending. mod_offline is an implementation of XEP-0203: Delayed Delivery.

"},{"location":"developers-guide/Hooks-and-handlers/#running-a-hook","title":"Running a hook","text":""},{"location":"developers-guide/Hooks-and-handlers/#basic-usage","title":"Basic usage","text":"

ejabberd_sm (ejabberd/MongooseIM session manager) is the module discovering whether the recipient of a message is available or not. That's where storing the message for later delivery takes place. It is possible, but not recommended, to save a message in an offline storage by calling mod_offline directly:

mod_offline:store_packet(Acc, From, To, Packet)\n

Note that in this example ejabberd_sm is coupled with mod_offline. I.e. if mod_offline was not available, the code would simply crash; if it was misconfigured or turned off, the behaviour would be undefined. To avoid that coupling and also to enable other (possibly yet to be written) code to carry out some action at this particular moment, ejabberd_sm calls instead:

mongoose_hooks:offline_message(Acc, From, To, Packet);\n

mongoose_hooks is a module which serves as an API for calling hooks in the server. All such modules are placed in src/hooks.

For every hook, there needs to be a function in this module written beforehand which accepts the correct arity of arguments and makes the call to actual low-level hooks mechanism. This means that there is some degree of coupling still - but this time between the ejabberd_sm module and mongoose_hooks, and the latter is always available.

The extra level of indirection introduced by this call gives the flexibility to determine at runtime what code actually gets run at this point. This depends on which handlers are registered to process the event.

offline_message is the name of the hook (in other words of the event that is being signalled); Acc is the Accumulator, described later; From, To and Packet are the arguments passed to the handler, just as they would in case of the function being called directly.

Why do we even need the mongoose_hooks module?

Why is there a module in which we have to define the hook invocation beforehand? Could we not just use the low-level hooks mechanism directly and avoid this module altogether?

This was actually the case before this module was introduced, and hooks' names were just atoms provided as an argument to this low-level API. However, we discovered it was causing problems and producing bugs, due to the lack of static code analysis. Now we can have some guarantees thanks to Dialyzer, and each hook invocation has a correct number of arguments. Thanks to this, writing handlers is easier - there is a single source of truth about how a hook is run. Remember that a given hook can be invoked from many places in many modules.

With the new mongoose_c2s implementation we introduced a new hook API module, mongoose_c2s_hooks. All such API modules are placed in the src/hooks directory.

"},{"location":"developers-guide/Hooks-and-handlers/#getting-results-from-handlers","title":"Getting results from handlers","text":"

Hook handlers are called by \"folding\". This means that each handler on a list is passed a set of arguments, and an initial value that it then modifies, returns and hands over to the next handler in line. This modified data that is processed by the series of handlers is called an accumulator - because it accumulates the results.

A simple example would look like this:

NewAcc = mongoose_hooks:a_certain_hook(Accumulator,\n                                       StateData#state.user,\n                                       StateData#state.server).\n

The initial value of the accumulator being passed through the sequence of handlers is provided with additional arguments required by the hook, as defined in the mongoose_hooks module.

Folds

If you haven't encountered the term fold before, think of it as reduce (like Array.reduce) in Ruby-speak, roughly equivalent to the Reduce step in MapReduce, sometimes called accumulate, aggregate or compress. See Wikipedia for more.

"},{"location":"developers-guide/Hooks-and-handlers/#using-accumulators","title":"Using accumulators","text":"

MongooseIM uses a dedicated data structure to accumulate data related to stanza processing (see \"Accumulators\"). It is instantiated with an incoming stanza, passed along throughout the processing chain, supplied to and returned from certain hook calls, and terminated when the stanza is leaving MongooseIM. There are some hooks which don't use this data structure.

If a Mongoose accumulator is passed to a hook, handlers should store their return values in one of 3 ways:

  • If it is a one-off value which doesn't need to be passed on along with the accumulator (can be overwritten any time), use mongoose_acc:set(hook, result, Value, Acc).
  • If the value is to be passed on to be reused within the current processing context, use mongoose_acc:set(Namespace, Key, Value, Acc).
  • If the value should be passed on to the recipient's session, pubsub node etc. use mongoose_acc:set_permanent(Namespace, Key, Value, Acc).

A real life example, then, with regard to mod_offline is the resend_offline_messages hook run in mod_presence:

Acc1 = mongoose_hooks:resend_offline_messages(Acc, Jid),\nRs = mongoose_acc:get(offline, messages, [], Acc1),\n
"},{"location":"developers-guide/Hooks-and-handlers/#error-handling-in-hooks","title":"Error handling in hooks","text":"

Hooks are meant to decouple modules; in other words, the caller signals that some event took place or that it intends to use a certain feature or a set of features, but how and if those features are implemented is beyond its interest. For that reason hooks don't use the \"let it crash\" approach. Instead, it is rather like \"fire-and-forget\", more similar in principle to the Pid ! signal way.

In practical terms: if a handler throws an error, the hook machine logs a message and proceeds to the next handler with an unmodified accumulator. If there are no handlers registered for a given hook, the call simply has no effect.

"},{"location":"developers-guide/Hooks-and-handlers/#sidenote-code-yet-to-be-written","title":"Sidenote: Code yet to be written","text":"

Let's imagine, that when building a minimum viable product we settle on using mod_offline for delayed delivery of messages to unavailable clients. However, while the product evolves (or the relevant client software catches up) we might drop mod_offline in favour of a more sophisticated solution like Message Archive Management which would require a different action to be taken at the same point. Thanks to loose coupling and mongoose_hooks, it's possible to turn off mod_offline and turn on mod_mam without changing a single line of code in ejabberd_sm.

The only required change is to the configuration (apart from deploying the new module) which can even be performed at runtime - without restarting the server.

"},{"location":"developers-guide/Hooks-and-handlers/#sidenote-multiple-domains","title":"Sidenote: Multiple Domains","text":"

A MongooseIM cluster may serve more than one domain at the same time. E.g. it is quite common that services like Multi User Chat or Publish-Subscribe are available as subdomains of the main XMPP domain served by an installation.

Moreover, each XMPP host is of a certain type, as defined in general.host_types, and hooks can be called either globally (across all hosts/host types) or for one host type. If you are not using dynamic domains or grouping hosts under host types, then each host has a corresponding host type implicitly, and the two terms are interchangeable. Whether a hook is called globally or per host type is depends on its purpose. It is decided when creating a hook and can be checked in the mongoose_hooks module for existing hooks.

"},{"location":"developers-guide/Hooks-and-handlers/#registering-hook-handlers","title":"Registering hook handlers","text":"

In order to store a packet when ejabberd_sm runs offline_message, the relevant module must register a handler for this hook. To attain the runtime configurability the module should register the handlers when it's loaded and unregister them when it's unloaded. That's usually done in, respectively, start/2 and stop/1 functions. Here is the relevant snippet from mod_offline:start/2:

gen_hook:add_handlers(hooks(HostType)),\n
and the hooks/1 function returns a list of tuples describing hook handlers, like:
{offline_message, HostType, fun ?MODULE:inspect_packet/3, #{}, 50}\n

It is clearly visible that the handler inspect_packet is added to the offline_message hook.

HostType is the one for which the handler will be executed. In the case of statically defined domains, it is the same as the host, as configured in the general.hosts section.

The handler itself is specified as a fun expression; the arity of the function is always 3 - more about actual arguments in the Writing handlers section. If the handler expects an incorrect number of arguments, it will simply crash.

The 4th element of this tuple is a map of static parameters that will be passed to every invocation of the handler. It allows to specify additional handler config at the moment of its registering.

Multiple handlers may be registered for the same hook. The last argument, 50, is the sequence number of this handler in the handler chain. The higher the number, the later in the sequence the handler will be executed. It's reasonable to keep this number small (e.g. in the range 0-100), though there's no real limit other than the size of the integer type in the Erlang VM.

"},{"location":"developers-guide/Hooks-and-handlers/#unregistering-handlers","title":"Unregistering handlers","text":"

Pluggability also requires the components to be unpluggable at will. For that purpose there's the option to unregister a hook handler. It's done in mod_offline:stop/1 in a similar fashion to:

gen_hook:delete_handlers(hooks(Host)),\n

The function hooks/1 function returns a list of hook tuples exactly the same as passed to gen_hook:add_handlers/1. Both these functions accept either a list of tuples. There also exist functions gen_hook:add_handler/5 and gen_hook:delete_handler/5 which register and unregister one handler at a time.

"},{"location":"developers-guide/Hooks-and-handlers/#sidenote-metrics","title":"Sidenote: Metrics","text":"

Every time a hook is run, a corresponding metric of the same name in the same host is incremented by one. There are some exceptions though as some metrics were implemented before the generic hook metrics. List of hooks not updating generic metrics can be found in the mongoose_metrics:filter_hook/1 function. Such skipped hooks update metrics are defined in the mongoose_metrics_hooks module.

"},{"location":"developers-guide/Hooks-and-handlers/#writing-handlers","title":"Writing handlers","text":"

The signature of a handler has to follow these rules:

  • Accepts correct arguments:
    • Acc - accumulator which was passed from previous handler (or initial accumulator). May be mongoose_acc in particular
    • Params - map of hook parameters passed from mongoose_hooks. It is constant for every handler in one hook invocation. For exact structure check the hook function in mongoose_hooks module, as different hooks use different parameters.
    • Extra - map of additional hook parameters. It is constant for every hook invocation. It is created from the map described in Registering hook handlers section with 3 additional parameters: host_type, hook_tag, hook_name. Parameter host_type can be particularly useful.
  • Returns a tuple {ok | stop, Acc} where Acc is the accumulator of the same type as the input one, that shall be passed to the next handler (or return value in case of last handler).

Let's look at this example, from MongooseIM codebase:

in_subscription(Acc, #{to := ToJID, from := FromJID, type := Type}, _) ->\n    case process_subscription(in, ToJID, FromJID, Type) of\n        stop ->\n            {stop, Acc};\n        {stop, false} ->\n            {stop, mongoose_acc:set(hook, result, false, Acc)};\n        _ -> {ok, Acc}\n    end.\n

As seen in this example, a handler receives an accumulator, parameters and extra parameters (in this case - ignored). Then it matches to the result of process_subscription/4 and can return 3 different values:

  • {ok, Acc} - it allows further processing and does not change the accumulator.
  • {stop, mongoose_acc:set(hook, result, false, Acc)} - it stops further processing and returns accumulator with a new value in it.
  • {stop, Acc} - it stops further processing and does not change the accumulator.

This is an important feature to note: in some cases our handler returns a tuple {stop, Acc}. This skips calling later actions in the handler sequence, while the hook call returns the Acc. Further processing is only performed if the first element of return tuple is ok.

Watch out! Different handlers may be registered for the same hook - the priority mechanism orders their execution. If a handler returns {stop, Acc} but runs early in the handler chain, it may prevent some other handler from running at all! That might or might not be intentional. It may be especially surprising in case of handlers from different modules registered for the same hook. Always ensure what handlers are registered for a given hook (grep is your friend) and that you understand their interdependencies.

"},{"location":"developers-guide/Hooks-and-handlers/#hooks-list-and-how-to-extract-it","title":"Hooks list and how to extract it","text":"

The following command should give you a list of all the hooks available in MongooseIM:

awk '/\\-export\\(\\[/,/\\]\\)\\./' src/hooks/*.erl | grep -oh \"\\w*/\" | sed 's/.$//' | sort\n
It returns:
adhoc_local_commands\nadhoc_sm_commands\n...\n...\n...\nxmpp_stanza_dropped\n

It just extracts the hooks exported from mongoose_hooks and other hook API modules. Refer to grep/ack to find where they're used.

"},{"location":"developers-guide/Hooks-and-handlers/#creating-your-own-hooks","title":"Creating your own hooks","text":"

You should put the new hook inside mongoose_hooks with a correct type specification, which provides some security in places where the hooks are run. This is the way all hooks are called in MongooseIM (see the examples in the hooks description). You could run gen_hook:run_fold directly, providing the hook name, but this is advised against.

Of course, as long as no module registers handlers for a hook, calling it won't have any effects.

This is similar to the case when a module registers handlers for some hook, but that hook is never run in the code. That won't have an effect either.

"},{"location":"developers-guide/Hooks-and-handlers/#example-of-creating-a-new-hook","title":"Example of creating a new hook","text":"

The following is an example of a module which both runs and registers a few handlers for a completely new hook. The handlers are run sequentially using disparate priorities and passing over an accumulator value. One of the handlers stops the handler execution chain prematurely by returning {stop, NewVal}. It's also possible to try out what happens when the same hook is run with different XMPP domains by passing an argument to run_custom_hook/1 - we'll see that the handlers are registered for a particular domain only.

At the end, you can see a printout of an accumulator with some debugging info.

To cut the long story short:

"},{"location":"developers-guide/Hooks-and-handlers/#1-add-the-hook-with-type-specification-to-mongoose_hooks","title":"1. Add the hook with type specification to mongoose_hooks","text":"
-spec custom_new_hook(HostType, Acc, Number) -> Result when\n    HostType :: mongooseim:host_type(),\n    Acc :: mongoose_acc:t(),\n    Number :: integer(),\n    Result :: mongoose_acc:t().\ncustom_new_hook(HostType, Acc, Number) ->\n    Params = #{number => Number},\n    run_hook_for_host_type(custom_new_hook, HostType, Acc, Params).\n

Don't forget about exporting the function:

-export([custom_new_hook/3]).\n

"},{"location":"developers-guide/Hooks-and-handlers/#2-create-the-mod_hook_example-module","title":"2. Create the mod_hook_example module","text":"
-module(mod_hook_example).\n\n-behaviour(gen_mod).\n\n-include(\"mongoose.hrl\").\n\n%% API\n-export([run_custom_hook/1]).\n\n%% gen_mod callbacks\n-export([start/2,\n         stop/1]).\n\n%% Hook handlers\n-export([first_handler/3,\n         stopping_handler/3,\n         never_run_handler/3]).\n\nstart(HostType, _Opts) ->\n    gen_hook:add_handlers(hooks(HostType)).\n\nstop(HostType) ->\n    gen_hook:delete_handlers(hooks(HostType)).\n\nhooks(HostType) ->\n    [{custom_new_hook, HostType, fun ?MODULE:first_handler/3, #{extra_param => <<\"ExtraParam\">>}, 25},\n     {custom_new_hook, HostType, fun ?MODULE:stopping_handler/3, #{}, 50},\n     {custom_new_hook, HostType, fun ?MODULE:never_run_handler/3, #{}, 75}].\n\nrun_custom_hook(Host) ->\n    {ok, HostType} = mongoose_domain_api:get_domain_host_type(Host),\n    Acc = mongoose_acc:new(#{ location => ?LOCATION, lserver => Host, host_type => HostType }),\n    Acc1 = mongoose_acc:set(example, value, 5, Acc),\n    ResultAcc = mongoose_hooks:custom_new_hook(HostType, Acc1, 2),\n    ResultValue = mongoose_acc:get(example, value, ResultAcc),\n    ?LOG_INFO(#{what => hook_finished, result => ResultValue, result_acc => ResultAcc}).\n\nfirst_handler(Acc, #{number := Number}, #{extra_param := Extra}) ->\n    V0 = mongoose_acc:get(example, value, Acc),\n    Result = V0 + Number,\n    ?LOG_INFO(#{what => first_handler, value => V0, argument => Number,\n                result => Result, extra => Extra}),\n    {ok, mongoose_acc:set(example, value, Result, Acc)}.\n\nstopping_handler(Acc, #{number := Number}, _) ->\n    V0 = mongoose_acc:get(example, value, Acc),\n    Result = V0 + Number,\n    ?LOG_INFO(#{what => stopping_handler, value => V0, argument => Number, result => Result}),\n    {stop, mongoose_acc:set(example, value, Result, Acc)}.\n\nnever_run_handler(Acc, #{number := Number}, _) ->\n    ?LOG_INFO(#{what => never_run_handler,\n                text => <<\"This handler won't run as it's registered with a priority bigger \"\n                          \"than that of stopping_handler/2 is. \"\n                          \"This text should never get printed.\">>}),\n    {ok, Acc * Number}.\n

The module is intended to be used from the shell for educational purposes:

(mongooseim@localhost)1> gen_mod:is_loaded(<<\"localhost\">>, mod_hook_example).\nfalse\n(mongooseim@localhost)2> mongoose_modules:ensure_started(<<\"localhost\">>, mod_hook_example, #{}).\n{started,ok}\n(mongooseim@localhost)3> gen_mod:is_loaded(<<\"localhost\">>, mod_hook_example).\ntrue\n(mongooseim@localhost)4> mongoose_logs:set_module_loglevel(mod_hook_example, info).\nok\n(mongooseim@localhost)5> mod_hook_example:run_custom_hook(<<\"localhost\">>).\nwhen=2022-12-15T12:37:16.109544+00:00 level=info what=first_handler pid=<0.1081.0> at=mod_hook_example:first_handler/3:41 value=5 result=7 extra=ExtraParam argument=2 \nwhen=2022-12-15T12:37:16.109809+00:00 level=info what=stopping_handler pid=<0.1081.0> at=mod_hook_example:stopping_handler/3:48 value=7 result=9 argument=2 \nwhen=2022-12-15T12:37:16.110028+00:00 level=info what=hook_finished pid=<0.1081.0> at=mod_hook_example:run_custom_hook/1:36 result_acc_{example,value}=9 result_acc_timestamp=1671107836109517 result_acc_stanza=undefined result_acc_ref=#Ref<0.4046106046.1908670465.111816> result_acc_origin_pid=<0.1081.0> result_acc_origin_location_mfa={mod_hook_example,run_custom_hook,1} result_acc_origin_location_line=32 result_acc_origin_location_file=/Users/paweldlugosz/Dev/Repos/MongooseIM/src/mod_hook_example.erl result_acc_non_strippable= result_acc_mongoose_acc=true result_acc_lserver=localhost result_acc_host_type=localhost result=9 \nok\n
"},{"location":"developers-guide/OpenSSL-and-FIPS/","title":"OpenSSL FIPS","text":"

Support for OpenSSL FIPS was added to MongooseIM in version 1.7.0.

"},{"location":"developers-guide/OpenSSL-and-FIPS/#incompatibilities","title":"Incompatibilities","text":"

Currently known incompatible features are:

  • SASL auth mechanism DIGEST-MD5: due to a forbidden MD5 hash function in FIPS mode.
"},{"location":"developers-guide/OpenSSL-and-FIPS/#requirements","title":"Requirements","text":""},{"location":"developers-guide/OpenSSL-and-FIPS/#build-erlangotp-with-fips-support","title":"Build Erlang/OTP with FIPS support","text":"

Make sure the option --enable-fips is specified for configure command. If you want to use a different OpenSSL than the default one, specify the option --with-ssl=PATH_TO_YOUR_OPENSSL as well. Here's an example of a command for building Erlang/OTP with kerl:

KERL_CONFIGURE_OPTIONS=\"--enable-fips\" ./kerl build 23.3 23.3-fips\n
"},{"location":"developers-guide/OpenSSL-and-FIPS/#building-mongooseim-with-a-custom-openssl","title":"Building MongooseIM with a custom OpenSSL","text":"

If you want to use a custom OpenSSL, please export the CFLAGS and LDFLAGS env vars pointing to a FIPS compliant OpenSSL before running ./rebar3 compile or make rel.

OPENSSL_LIB=~/openssl/lib #put your path here\nOPENSSL_INC=~/openssl/inc #put your path here\n\nexport LDFLAGS=\"-Wl,-rpath=$OPENSSL_LIB -L$OPENSSL_LIB\"\nexport CFLAGS=\"-I$OPENSSL_INC\"\n
"},{"location":"developers-guide/OpenSSL-and-FIPS/#how-to-enabledisable-fips-mode","title":"How to enable/disable FIPS mode","text":"

Find etc/app.config in the release directory. FIPS mode is an option of the crypto application. In order to enable/disable it, add the following section to app.config:

{crypto, [{fips_mode, Value}]},\n

where Value is either true or false.

"},{"location":"developers-guide/OpenSSL-and-FIPS/#how-to-check-if-the-fips-mode-is-enabled","title":"How to check if the FIPS mode is enabled","text":""},{"location":"developers-guide/OpenSSL-and-FIPS/#log-message","title":"Log message","text":"

When MongooseIM starts, it prints the following log message if FIPS mode is enabled

2015-02-25 14:30:54.501 [warning] <0.242.0>@mongoose_fips:do_notify:37 FIPS mode enabled\n
"},{"location":"developers-guide/OpenSSL-and-FIPS/#run-time-check","title":"Run-time check","text":"

Run the following function in the MongooseIM console:

mongoose_fips:status().\n

The function returns:

  • not_enabled - fips_mode is not set to true in etc/app.config
  • enabled - fips_mode is set to true in etc/app.config
  • not_supported - erlang compiled without fips support
"},{"location":"developers-guide/OpenSSL-and-FIPS/#cipher-suites-difference","title":"Cipher suites difference","text":"

A test using a cipher_suites_test.sh script (available in the tools directory) can be performed on MongooseIM with FIPS mode enabled and disabled. We've used OpenSSL 1.0.1j-fips.

Here are all the cipher suites available when the FIPS mode is enabled (the list may vary for different openssl versions):

  • ECDHE-RSA-AES256-SHA
  • DHE-RSA-AES256-SHA
  • AES256-SHA
  • ECDHE-RSA-DES-CBC3-SHA
  • EDH-RSA-DES-CBC3-SHA
  • DES-CBC3-SHA
  • ECDHE-RSA-AES128-SHA
  • DHE-RSA-AES128-SHA
  • AES128-SHA

Here are all the cipher suites available when the FIPS mode is disabled (the list may vary for different openssl versions):

  • ECDHE-RSA-AES256-SHA
  • DHE-RSA-AES256-SHA
  • DHE-RSA-CAMELLIA256-SHA
  • AES256-SHA
  • CAMELLIA256-SHA
  • ECDHE-RSA-DES-CBC3-SHA
  • EDH-RSA-DES-CBC3-SHA
  • DES-CBC3-SHA
  • ECDHE-RSA-AES128-SHA
  • DHE-RSA-AES128-SHA
  • DHE-RSA-SEED-SHA
  • DHE-RSA-CAMELLIA128-SHA
  • AES128-SHA
  • SEED-SHA
  • CAMELLIA128-SHA
  • ECDHE-RSA-RC4-SHA
  • RC4-SHA
  • RC4-MD5
"},{"location":"developers-guide/SCRAM-serialization/","title":"SCRAM serialization format","text":""},{"location":"developers-guide/SCRAM-serialization/#overview","title":"Overview","text":"

This document describes the SCRAM serialization format used by MongooseIM. Developers can use this information to create advanced endpoints for ejabberd_auth_http or enable other software to read (i.e. share) the user authentication data.

"},{"location":"developers-guide/SCRAM-serialization/#format-description","title":"Format description","text":"

==MULTI_SCRAM==,<iteration count>,===SHA1===<salt>|<stored key>|<server key>,==SHA224==<salt>|<stored key>|<server key>,==SHA256==<salt>|<stored key>|<server key>,==SHA384==<salt>|<stored key>|<server key>,==SHA512=<salt>|<stored key>|<server key>

  • <iteration count> - Iteration Count formatted as a human-readable integer
  • <salt> - Base64-encoded Salt
  • <stored key> - Base64-encoded Stored Key
  • <server key> - Base64-encoded Server Key

The SCRAM format can vary depending on the SHA algorithms that are used for SCRAM. Salt and iteration count is common for different SHA types. Stored Key and Server Key are specific to a given SHA and are following a SHA prefix that is indicating which SHA they belong to.

In order to learn more about the meaning of the Stored Key, Server Key, Salt and Iteration Count, please check the SCRAM specification.

"},{"location":"developers-guide/SCRAM-serialization/#example","title":"Example","text":"
  • Password: padthai
  • Erlang map:
    #{iteration_count => 4096,\n  sha =>\n      #{salt => <<\"QClQsw/sfPEnwj4AEp6E1w==\">>,\n        server_key => <<\"EJvxXWM42tO7BgW21lNZyBc1dD0=\">>,\n        stored_key => <<\"ys1104hRhqMoRputBY5sLHKXoSw=\">>},\n  sha224 =>\n      #{salt => <<\"dk0ImXFVPoUfqD5FveV7YA==\">>,\n        server_key => <<\"EvE2EkZcUb3k4CooeOcVFy95P32t+NDX0xbQUA==\">>,\n        stored_key =>\n            <<\"G0ibQ/YYuCtoun4I+1IF2zJ7Q8x2T23ETnq5Gg==\">>},\n  sha256 =>\n      #{salt => <<\"M7BYKSo04XbzBr4C7b056g==\">>,\n        server_key =>\n            <<\"XhtGFf6NDWsnVSCO4xkzPD3qc046fPL0pATZi7RmaWo=\">>,\n        stored_key =>\n            <<\"A779MC05nSGQln5no0hKTGHFSaQ7oguKBZgORW3s+es=\">>},\n  sha384 =>\n      #{salt => <<\"Ryu0fA29gbwgqFOBk5Mczw==\">>,\n        server_key =>\n            <<\"kR+LMI/E0QBG3oF405/MTAT6NAlCOfPrFOaWH3WBVGM0Viu9Brk6kGwVwXjSP8v0\">>,\n        stored_key =>\n            <<\"k3QwC0Lb1y1/V/31byC5KML5t3mH4JTPjFyeAz7lV2l4SPfzi3JHvLEdoNB5K/VY\">>},\n  sha512 =>\n      #{salt => <<\"SLNuVNcWiNBmnYZNIdj+zg==\">>,\n        server_key =>\n            <<\"jUUDbuQ9ae4UnAWS6RV6W4yifX3La3ESjfZjGol+TBROIb/ihR8UawPHrSHkp4yyDJXtRhR9RlHCHy4bcCm1Yg==\">>,\n        stored_key =>\n            <<\"3ey3gzSsmbxcLnoc1VKCR/739uKX6uuPCyAzn6x8o87ibcjOdUaU8qhL5X4MUI9UPTt667GagNpVTmAWTFNsjA==\">>}}\n
  • Serialized password:
    ==MULTI_SCRAM==,4096,\n===SHA1===QClQsw/sfPEnwj4AEp6E1w==|ys1104hRhqMoRputBY5sLHKXoSw=|EJvxXWM42tO7BgW21lNZyBc1dD0=,\n==SHA224==dk0ImXFVPoUfqD5FveV7YA==|G0ibQ/YYuCtoun4I+1IF2zJ7Q8x2T23ETnq5Gg==|EvE2EkZcUb3k4CooeOcVFy95P32t+NDX0xbQUA==,\n==SHA256==M7BYKSo04XbzBr4C7b056g==|A779MC05nSGQln5no0hKTGHFSaQ7oguKBZgORW3s+es=|XhtGFf6NDWsnVSCO4xkzPD3qc046fPL0pATZi7RmaWo=,\n==SHA384==Ryu0fA29gbwgqFOBk5Mczw==|k3QwC0Lb1y1/V/31byC5KML5t3mH4JTPjFyeAz7lV2l4SPfzi3JHvLEdoNB5K/VY|kR+LMI/E0QBG3oF405/MTAT6NAlCOfPrFOaWH3WBVGM0Viu9Brk6kGwVwXjSP8v0,\n==SHA512==SLNuVNcWiNBmnYZNIdj+zg==|3ey3gzSsmbxcLnoc1VKCR/739uKX6uuPCyAzn6x8o87ibcjOdUaU8qhL5X4MUI9UPTt667GagNpVTmAWTFNsjA==|jUUDbuQ9ae4UnAWS6RV6W4yifX3La3ESjfZjGol+TBROIb/ihR8UawPHrSHkp4yyDJXtRhR9RlHCHy4bcCm1Yg==\n
"},{"location":"developers-guide/SCRAM-serialization/#legacy-format-description","title":"Legacy format description","text":"

MongooseIM installations older or equal to 3.6.2 were supporting only SHA-1 as a hashing algorithm for SCRAM. The SCRAM format that was used can be seen below.

==SCRAM==,<stored key>,<server key>,<salt>,<iteration count>

  • <stored key> - Base64-encoded Stored Key
  • <server key> - Base64-encoded Server Key
  • <salt> - Base64-encoded Salt
  • <iteration count> - Iteration Count formatted as a human-readable integer

In order to learn more about the meaning of the Stored Key, Server Key, Salt and Iteration Count, please check the SCRAM specification.

"},{"location":"developers-guide/SCRAM-serialization/#example_1","title":"Example","text":"
  • Password: misio
  • Erlang record: #scram{ storedkey = <<\"tmi5IE+9pceRV/jkPLFHEaVY33c=\">>, serverkey = <<\"MiWNa8T3dniVDwmh77ufJ41fpAQ=\">>, salt = <<\"inKXODlSY5y5SCsLxibi0w==\">>, iterationcount = 4096 }
  • Serialized password: ==SCRAM==,tmi5IE+9pceRV/jkPLFHEaVY33c=,MiWNa8T3dniVDwmh77ufJ41fpAQ=,inKXODlSY5y5SCsLxibi0w==,4096
"},{"location":"developers-guide/Stanza-routing/","title":"Route of a message through the system","text":"

Let's examine the flow of a message sent from Alice to Bob, both of whom are served by the same domain and connected to the server.

Note that hooks are called at various stages of routing - they perform many tasks, and many MongooseIM functionalities are implemented through hooks & handlers. For a general introduction to hooks, see Hooks and Handlers; to get a closer look at a core few, see the hooks description.

"},{"location":"developers-guide/Stanza-routing/#1-senders-c2s-process-receives-the-message","title":"1. Sender's C2S process receives the message","text":"

Alice's C2S (client-to-server) process, which is a state machine implemented in the mongoose_c2s module, receives data from the TCP socket, and parses each incoming XML element with exml to an internal representation of the stanza, which is then processed by the C2S as a subsequent event.

"},{"location":"developers-guide/Stanza-routing/#2-call-to-user_send_-hooks","title":"2. Call to user_send_* hooks","text":"

Upon some minimal validation of the stanza, the hook user_send_packet is called. Next, depending on the type of the stanza, one of the following hooks is called:

  • user_send_message for messages,
  • user_send_presence for presences,
  • user_send_iq for IQ (info/query) stanzas,
  • user_send_xmlel for other XML elements.

Each hook can be handled by multiple modules subscribed to it. Those modules do various complementary tasks, like storing the message in an archive, sending carbon copies, checking the stanza against privacy lists etc. It is possible for a handler to immediately stop routing at this point, preventing execution of any subsequent handlers or hooks. See hooks description for more information.

"},{"location":"developers-guide/Stanza-routing/#3-message-routing","title":"3. Message routing","text":"

The stanza is routed by ejabberd_router:route/3, which passes it through a chain of routing modules implementing the xmpp_router behaviour and applies the following functions for each of them:

  1. Mod:filter/3, which either drops the stanza, stopping the routing chain, or returns it for further processing, modifying it if necessary.
  2. Mod:route/3, which either handles the stanza, stopping the routing chain, or returns it for further processing, modifying it if necessary.

A list of routing modules can be set in the routing_modules option. The default behaviour is the following:

  • mongoose_router_global: runs a global filter_packet hook.
  • mongoose_router_localdomain: if there is a local route registered for the destination domain (i.e. there is an entry in the mongoose_router ETS table), routes the stanza to it. When the recipient's domain is checked for the first time, the corresponding route is not registered yet, because the routes are added lazily - see mongoose_router_dynamic_domains.
  • mongoose_router_external_localnode: if there is an external component registered for the destination domain on the current node, routes the stanza to it. Such components are stored in the Mnesia table external_component, which is not replicated in the cluster.
  • mongoose_router_external: if there is an external component registered for the destination domain on any node in the cluster, routes the stanza to it. Such components are stored in the Mnesia table external_component_global, which is replicated among all cluster nodes.
  • mongoose_router_dynamic_domains: if the recipient's domain is hosted by the local server, a route is added for it, and the stanza is routed locally.
  • ejabberd_s2s: tries to find or establish a connection to another server and send the stanza there.

Assuming that the message from Alice to Bob is not the first stanza addressed to their domain, the routing chain will stop at mongoose_router_localdomain, which will deliver the message locally.

"},{"location":"developers-guide/Stanza-routing/#4-mongoose_local_delivery","title":"4. mongoose_local_delivery","text":"

When an external component or a local route is found, the packet is delivered locally by mongoose_local_delivery:do_route/5. Firstly, the filter_local_packet hook is run to check if the stanza should be delivered or dropped. This hook is also a place where modules can add their own functionality evaluated for each locally delivered stanza.

If the check passes, the next step is to call the handler associated with the component or the local route. Handlers are modules implementing the mongoose_packet_handler behaviour, and stanzas to local users (like Alice and Bob) are handled by the ejabberd_local module.

"},{"location":"developers-guide/Stanza-routing/#5-ejabberd_local-to-ejabberd_sm","title":"5. ejabberd_local to ejabberd_sm","text":"

ejabberd_local:process_packet/5 checks if the stanza is addressed to a user or to the server itself. For local users like Bob, ejabberd_sm:route/4 is called.

"},{"location":"developers-guide/Stanza-routing/#6-ejabberd_sm","title":"6. ejabberd_sm","text":"

ejabberd_sm determines the available resources of the recipient, takes into account their priorities and whether the message is addressed to a particular resource or a bare JID. It appropriately replicates (or not) the message and sends it to the recipient's C2S process(es) by calling mongoose_c2s:route/2. In case no resources are available for delivery (hence no C2S processes to pass the message to), the offline_message hook is run.

As Bob has one online session, the message is sent to the C2S process associated with that session.

"},{"location":"developers-guide/Stanza-routing/#7-recipients-c2s-process-delivers-the-message","title":"7. Recipient's C2S process delivers the message","text":"

The user_receive_packet hook is run to notify the rest of the system about the stanza delivery. Next, depending on the type of the stanza, one of the following hooks is called:

  • user_receive_message for messages,
  • user_receive_presence for presences,
  • user_receive_iq for IQ (info/query),
  • user_receive_xmlel for other XML elements.

Each hook can be handled by multiple modules subscribed to it. These hooks' handlers can stop the routing, e.g. when the stanza is blocked by mod_privacy. Finally, the xmpp_presend_element hook is called, which is used mod_csi and mod_stream_management. This is the last hook that can stop the routing - otherwise, the stanza is converted to binary data and sent to the recipient's TCP socket.

"},{"location":"developers-guide/Testing-MongooseIM/","title":"Test runner","text":"

The test runner script is used to compile MongooseIM and run tests.

"},{"location":"developers-guide/Testing-MongooseIM/#requirements","title":"Requirements","text":""},{"location":"developers-guide/Testing-MongooseIM/#docker","title":"Docker","text":"

Docker could be installed on the local system, and the user executing the tests must have privileges to start new containers (usually achieved by adding the user to the docker group).

Alternatively, you can use Podman. Here is how to install it on Mac:

brew install podman\npodman machine init\npodman machine start\nln -s /usr/local/bin/podman /usr/local/bin/docker\n

You can also specify which container supervisor you want to use by defining an environment variable in your ~/.bashrc:

export DOCKER=podman\n
"},{"location":"developers-guide/Testing-MongooseIM/#freetds-for-mssql-connectivity","title":"FreeTDS for MSSQL connectivity","text":"

MongooseIM requires FreeTDS in order to connect to MSSQL container.

Please install the driver:

# Ubuntu\n$ sudo apt install freetds-dev tdsodbc\n\n# CentOS compatible systems (Rocky, Alma)\n$ sudo yum install freetds\n\n# macOS\n$ brew install freetds\n

In case you are using an operating system different from Ubuntu or MacOS or have a custom FreeTDS installation, you may have to modify the tools/setup-db.sh script to use the proper paths. Find a configuration block starting with [mongoose-mssql] and change the Driver and Setup. For example, for CentOS compatible systems change them to /usr/lib64/libtdsodbc.so.0 and /usr/lib64/libtdsS.so respectively.

"},{"location":"developers-guide/Testing-MongooseIM/#how-to-print-the-instructions","title":"How to print the instructions","text":"

The help command prints a list of supported options.

./tools/test-runner.sh --help\n
"},{"location":"developers-guide/Testing-MongooseIM/#test-runner-examples","title":"Test runner examples","text":"

Usage example:

./tools/test-runner.sh --db redis --preset internal_mnesia\n

The command runs both big (feature) and small (unit) tests.

To view more examples, run:

./tools/test-runner.sh --examples\n
"},{"location":"developers-guide/Testing-MongooseIM/#test-runner-completion","title":"Test runner completion","text":"

Test runner supports shell TAB completion.

To enable completion in bash or zsh, run:

source tools/test-runner-complete.sh\n

To view completion examples, run:

./tools/test-runner.sh --examples-complete\n
"},{"location":"developers-guide/Testing-MongooseIM/#viewing-test-reports","title":"Viewing test reports","text":"

To view test execution results, run:

./tools/test-runner.sh --show-big-reports\n./tools/test-runner.sh --show-small-reports\n
"},{"location":"developers-guide/Testing-MongooseIM/#rerun-big-tests","title":"Rerun big tests","text":"

Very often we want to restart a specific suite when some test failed.

For example, some test has failed in mam_SUITE. The command was used to execute tests:

./tools/test-runner.sh --skip-small-tests --db pgqsl --preset pgsql_mnesia --skip-stop-nodes\n

--skip-stop-nodes is optional here, because if any big test fails, then nodes would be still running.

We can just execute the same command, but it would rebuild nodes and start them.

The command can be used instead:

./tools/test-runner.sh --rerun-big-tests -- mam\n

--rerun-big-tests expands into --skip-small-tests --skip-setup-db --dev-nodes --test-hosts --skip-cover --skip-preset.

And mam is used to run mam_SUITE suite only.

"},{"location":"developers-guide/Testing-MongooseIM/#debugging-big-tests-database","title":"Debugging big tests database","text":"

This command opens MySQL shell interface:

./tools/open-test-database-shell.sh mysql\n

This command opens PgSQL shell interface:

./tools/open-test-database-shell.sh pgsql\n

This command opens MSSQL shell interface:

./tools/open-test-database-shell.sh mssql\n

You can use this command to execute SQL queries directly. It's useful when designing new SQL queries.

"},{"location":"developers-guide/Testing-MongooseIM/#unit-tests-aka-small-tests","title":"Unit tests (a.k.a. \"small tests\")","text":"

These test suites are aimed at testing various modules and libraries standalone, without launching a MongooseIM instance. They are very useful for developing/debugging libraries.

The test suites are located in test/ directory. To run all of them, use ./rebar3 ct; to run just a selected suite, use ./rebar3 ct --suite test/my_selected_SUITE. Rebar recompiles all the code automatically, there is no need for a separate compilation step.

If all the tests pass, you will get no output and summary log will be available in ct.log. If any of the tests fail the summary log is printed to stdout.

Detailed test results in a nice HTML format are saved in

_build/test/logs/ct_run.[something][datetime]/\n

Unit test running example using test runner:

# Run all small tests, show progress\n./tools/test-runner.sh --skip-big-tests --verbose\n\n# Run sha_SUITE without cover\n./tools/test-runner.sh --skip-big-tests --skip-cover -- sha\n\n# Run the 'general' group in config_parser_SUITE, show progress\n./tools/test-runner.sh --skip-big-tests --verbose -- config_parser:general\n
"},{"location":"developers-guide/Testing-MongooseIM/#end-to-end-tests-aka-big-tests","title":"End-to-end tests (a.k.a. \"big tests\")","text":""},{"location":"developers-guide/Testing-MongooseIM/#using-test-runner","title":"Using test runner","text":"

Most important options are preset and database:

# Runs privacy_SUITE and private_SUITE with PostgreSQL\n./tools/test-runner.sh --skip-small-tests --db pgsql --preset pgsql_mnesia -- privacy private\n\n# Runs rdbms_SUITE with MSSQL\n# Initialises a single MongooseIM node (works for some tests only)\n# Disables cover\n./tools/test-runner.sh --skip-small-tests --db mssql --preset odbc_mssql_mnesia --test-hosts mim --dev-nodes mim1 --skip-cover -- rdbms\n
"},{"location":"developers-guide/Testing-MongooseIM/#tldr","title":"TL;DR","text":"

You can also run the tests \"by hand\", instead of using the test runner.

In shell #1:

cd $MONGOOSEIM\n./rebar3 compile\nmake devrel\n

If databases are needed, for example PostgreSQL, you can run:

DB=\"pgsql\" ./tools/setup-db.sh\n

In shell #2:

cd $MONGOOSEIM/_build/mim1/rel/mongooseim\n./bin/mongooseimctl live\n

In shell #3:

cd $MONGOOSEIM/_build/mim2/rel/mongooseim\n./bin/mongooseimctl live\n

In shell #4:

cd $MONGOOSEIM/_build/mim3/rel/mongooseim\n./bin/mongooseimctl live\n

In shell #5:

cd $MONGOOSEIM/_build/fed1/rel/mongooseim\n./bin/mongooseimctl live\n

In shell #6:

cd $MONGOOSEIM/_build/reg1/rel/mongooseim\n./bin/mongooseimctl live\n

Back to shell #1:

cd big_tests/\nmake quicktest\n

Wait for the tests to finish and celebrate (or wallow in despair and grief)!

One-liner alternative for tmux users:

./rebar3 compile\nmake devrel\ntmux new-window -n mim1 '_build/mim1/rel/mongooseim/bin/mongooseimctl live'\ntmux new-window -n mim2 '_build/mim2/rel/mongooseim/bin/mongooseimctl live'\ntmux new-window -n mim3 '_build/mim3/rel/mongooseim/bin/mongooseimctl live'\ntmux new-window -n fed1 '_build/fed1/rel/mongooseim/bin/mongooseimctl live'\ntmux new-window -n reg1 '_build/fed1/rel/mongooseim/bin/mongooseimctl live'\n_build/mim1/rel/mongooseim/bin/mongooseimctl started\n_build/mim2/rel/mongooseim/bin/mongooseimctl started\n_build/mim3/rel/mongooseim/bin/mongooseimctl started\n_build/fed1/rel/mongooseim/bin/mongooseimctl started\n_build/reg1/rel/mongooseim/bin/mongooseimctl started\nmake -C big_tests quicktest\n

Start a new tmux and paste the commands.

"},{"location":"developers-guide/Testing-MongooseIM/#step-by-step-breakdown","title":"Step-by-step breakdown","text":"

make devrel builds four server nodes, preconfigured for a wide range of features covered by end-to-end tests.

  • $MONGOOSEIM/_build/mim1/rel, for most test SUITEs
  • $MONGOOSEIM/_build/mim*/rel, in order to test cluster-related commands;;
  • $MONGOOSEIM/_build/fed1/rel, in order to test XMPP federation (server to server communication, S2S).
  • $MONGOOSEIM/_build/reg1/rel, in order to test global distribution feature.

In general, running a server in the interactive mode (i.e. mongooseimctl live) is not required to test it, but it's convenient as any warnings and errors can be spotted in real time. It's also easy to inspect the server state or trace execution (e.g. using dbg) in case of anything going wrong in some of the tests. To run the server in the background instead of the interactive mode, use mongooseimctl start && mongooseimctl started.

The quicktest configuration is a relatively comprehensive one, giving good overview of what does and what doesn't work in the system, without repeating tests. Why would we want to ever repeat the tests? In order to test different backends of the same parts of the system. E.g. a message archive might store messages in MySQL/PostgreSQL or Elasticsearch - the glue code between the XMPP logic module and database is different in each case, therefore repeating the same tests with different databases is necessary to guarantee a truthful code coverage measurement.

"},{"location":"developers-guide/Testing-MongooseIM/#testing-a-feature-in-development-tdd","title":"Testing a feature in development / TDD","text":"

The whole suite takes a significant amount of time to complete. When you develop a new feature, the speed of iterating is crucial to maintain the flow (who doesn't like the feeling?!) and not lose focus.

In $MONGOOSEIM/big_tests/ we have:

$ tree big_tests/ -L 1 -F\nbig_tests/\n\u251c\u2500\u2500 Makefile\n\u251c\u2500\u2500 README.md\n\u251c\u2500\u2500 default.spec\n\u251c\u2500\u2500 test.config\n\u251c\u2500\u2500 tests/\n\u2514\u2500\u2500 ...\n

tests/ is where the test suites reside.

*.config files are the suite configuration files - they contain predefined XMPP client specifications, server addresses and XMPP domains to use, and options required by test support libraries (i.e. Escalus).

*.spec files are the test specifications - they define the configuration file to use, the suites, test groups or individual test cases to run or skip, and some less important things.

default.spec is the default when running make quicktest, but it can be overridden with a TESTSPEC variable:

# make sure we're in $MONGOOSEIM/big_tests/\ncd $MONGOOSEIM/big_tests/\nmake quicktest TESTSPEC=my-feature.spec\n

To speed up the development cycle, developers usually create a .spec file for each feature (or each project, if you're cloning away) and only enable the suites / test groups they are working on. The allows testing only the parts of the system that are actually being changed. It's worth running default.spec once in a while to check for regressions.

Consult the default.spec file to see how to run only selected tests/groups/cases.

If you're sure that none of the test dependencies have changed, and you only edited the test suites and/or MongooseIM code, it's possible to speed up the tests by skipping the Rebar dependency and compilation checks by providing PREPARE= (i.e. an empty value):

make quicktest PREPARE=\n

Consult the big_tests/Makefile to see how it works.

"},{"location":"developers-guide/Testing-MongooseIM/#applying-code-changes","title":"Applying code changes","text":"

When working on a feature or a bug fix you often modify the code and check if it works as expected. In order to change the code on dev nodes that are already generated (mim* and fed*) recompile the code for a specific node. For example, to update the code on mim1 node all you have to do is:

./rebar3 as mim1 compile\n

A similar command applies to other nodes, the important thing being rebar3's profile.

When the above command finishes, the code can be reloaded on the server by either reloading changed module(s) in the node's shell, e.g. l(mongoose_rdbms), or restarting the node.

"},{"location":"developers-guide/Testing-MongooseIM/#reading-test-reports","title":"Reading test reports","text":"

When finished, the test engine writes detailed html reports into a directory:

big_tests/ct_report/ct_run.[gobbledygook][datetime]/\n

Each run is saved into a new directory. This snippet:

#!/bin/bash\n\nlst=$(ls -rt ct_report | grep ct_run | tail -n 1)\nrm ct_report/lastrun\nln -s $lst ct_report/lastrun\n

can be of some help.

"},{"location":"developers-guide/Testing-MongooseIM/#checking-coverage","title":"Checking coverage","text":"

If you want to check how much of the code is covered by tests, run:

make cover_quicktest\n

Note

You need all the mim nodes (mim1, mim2 and mim3) up and running, even if you only run some of the tests. If any of the nodes is down, the test will crash.

This command will recompile and reload the code on dev nodes with coverage enabled and run test suites as defined in the spec. Coverage statistics will be available in big_tests/ct_report/cover.html and coverage subdirectory.

"},{"location":"developers-guide/Testing-MongooseIM/#advanced-topics","title":"Advanced topics","text":"

There are many more options available. One of them is sequentially testing a number of preset configurations - we do it every day on CircleCI, testing MongooseIM with various OTP versions and database backends. Altogether, we have eight preset configuration.

If you want to dig deeper, consult .circleci/config.yml, .github/workflows/ci.yml and tools/test.sh, everything we do is there.

"},{"location":"developers-guide/Testing-MongooseIM/#gathering-test-reports-from-tests","title":"Gathering test reports from tests","text":"

If you test your MongooseIM fork on GitHub Actions or other CI provider, you might want to access test reports (which also include node logs and crash dumps) that are created by the test runner.

"},{"location":"developers-guide/Testing-MongooseIM/#uploading-reports-to-s3","title":"Uploading reports to S3","text":"

Our script uses AWS CLI to upload test results to an S3 bucket. Simply set relevant environment variables in your repository settings (at least AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY have to be set), and enjoy test reports landing straight into your bucket (AWS_BUCKET variable should store the bucket's name).

"},{"location":"developers-guide/Testing-MongooseIM/#uploading-reports-to-google-drive","title":"Uploading reports to Google Drive","text":"

To store test results in Google Drive you need to create a new project and obtain service account credentials. You must also add Google Drive API to your project - to do this, navigate to APIs & Services in your project console and find & add Google Drive API in the Library tab. Once downloaded, encode the credentials file with base64 (e.g. cat serviceCreds.json | base64) and use the result as GDRIVE_SERVICE_ACCOUNT_CREDENTIALS environment variable in your repository settings.

"},{"location":"developers-guide/Testing-MongooseIM/#saving-reports-on-your-personal-account","title":"Saving reports on your personal account","text":"

The uploaded files will belong to the project that you created, i.e. will not be immediately visible from your personal Google Drive UI. To be able to upload files to your personal account, you can share the reports' directory with the project account. First, note the ID of the project's user that you created to gain the service account credentials (e.g. test-123@fair-smile-123456.iam.gserviceaccount.com). You can see this on the Service Accounts tab of the project console. Now, create a directory on your Google Drive that will serve as the test root directory. Go into the directory's sharing options and paste in the project's user ID, granting it write access. Click to expand the advanced sharing options and note the ID of the shared directory that's displayed in the share link (e.g. if the link is https://drive.google.com/drive/folders/1234567890abcdef?usp=sharing, the directory's ID is 1234567890abcdef). Finally, set GDRIVE_PARENT_DIR environment variable of your build to the directory ID that you noted in the previous step.

"},{"location":"developers-guide/Testing-MongooseIM/#load-testing","title":"Load testing","text":"

Alongside CI, we do also CLT (Continuous Load Testing). We use amoc and amoc-arsenal-xmpp to perform tests that give us a feedback on changes to MongooseIM performance.

"},{"location":"developers-guide/accumulators/","title":"Accumulators","text":"

XMPP stanza processing starts in the mongoose_c2s module, which receives the stanza from a socket, or in ejabberd_s2s_in which receives stanzas from federated XMPP clusters. The stanza is processed and eventually it and/or other messages are sent out, either to the original sender, to another c2s process within the same MongooseIM installation, or to another XMPP server.

At the beginning of the main processing chain an accumulator is created containing following set of keys:

  • ref - A unique reference of the acc, useful for tracing.
  • timestamp - An Erlang timestamp retrieved from os:timestamp().
  • origin_pid - A PID of the process that created the accumulator.
  • origin_location - {Module, Function Line} - A place in the code where the accumulator was created.
  • origin_stanza - Original stanza that triggered the processing (in a binary).
  • lserver - Nameprepped domain of the processing context.
  • host_type - Host type that the domain belongs to.
  • statem_acc - Data related to the C2S state machine.
  • stanza - A map with information about the stanza being routed. May be missing in some processing chains (when they are not triggered by a stanza)!
    • element - exml:element() with the current stanza being routed.
    • from_jid, to_jid - jid:jid() with the sender and the recipient.
    • name - A name of the top-level element in element.
    • type - A value of type attribute of the top-level element. If the attribute is missing, this field contains undefined.
    • ref - A reference of routed stanza.

It is then passed through all the stages until it reaches the end of its life. Throughout the process it is the very same accumulator; it is therefore possible to store a value in it on one stage of the processing and retrieve the same value later on.

The main assumption is that whatever MongooseIM does, it is always triggered by a stanza entering the system, with some exceptions, such as a couple of mongooseimctl operations, which create stanza-less accumulators. The stanza should always be packed into an accumulator and passed on, so that internally every action is performed the same way.

There are three main benefits from this approach:

  1. Performance - if we need to do something involving inspecting a stanza or more complicated operations (e.g. privacy check) we don't need to do it multiple times on various stages of processing - instead we can do it once and store the result in an accumulator.
  2. Debugging - it is now very easy to produce an exact track record of a stanza.
  3. Simplified implementation of modules which inherently involve multi-stage processing (e.g. mod_amp).
"},{"location":"developers-guide/accumulators/#api","title":"API","text":"

mongoose_acc module exports t() type which is the accumulator type.

"},{"location":"developers-guide/accumulators/#newnew_acc_params","title":"new(new_acc_params())","text":"

A constructor for accumulators. new_acc_params() is a map with following supported keys:

  • location - Should be a {Module, Function, Line} tuple (may be constructed with ?LOCATION macro from mongoose.hrl). Its format is not enforced by the acc logic but Dialyzer will most probably complain about any other type.
  • lserver - Nameprepped domain of the processing context.
  • host_type (optional) - Host type that the domain belongs to.
  • statem_acc (optional) - Data related to the C2S state machine.
  • element (optional) - If present, it will be used as a source for the stanza map.
  • from_jid, to_jid (optional) - Values used to override from and to attributes of the element, respectively.

If element is provided, the sender and recipient JIDs are extracted, either from the element itself, or from to_jid and from_jid parameters. The call will fail with an exception if it's not possible.

While allowed, stanza-less accumulators usage should be avoided.

"},{"location":"developers-guide/accumulators/#getters-for-predefined-fields","title":"Getters for predefined fields","text":"
  • ref(t())
  • timestamp(t())
  • lserver(t())
  • host_type(t())
  • element(t())
  • to_jid(t())
  • from_jid(t())
  • get_statem_acc(t())
  • packet(t()) - Returns an mongoose_c2s:packet() if there is a stanza in the accumulator.
  • stanza_name(t()) - Returns name value from stanza map.
  • stanza_type(t()) - Returns type value from stanza map.
  • stanza_ref(t()) - Returns ref value from stanza map. This is not the same as ref(t())!
"},{"location":"developers-guide/accumulators/#update_stanzastanza_params-t","title":"update_stanza(stanza_params(), t())","text":"

Replaces the whole stanza field in the accumulator with params provided in stanza_params(), which is a map of 3 fields: element, from_jid, to_jid. The same rules apply as in the case of constructor (new/1) but this time element field is mandatory.

"},{"location":"developers-guide/accumulators/#access-to-namespaced-fields","title":"Access to namespaced fields","text":"

It is possible to store and retrieve any data in the accumulator, that is related to the processing. There is no scope protection, so every module may access all namespaces and keys inside them.

  • set(Namespace :: any(), Key :: any(), Value :: any(), t())
  • set_permanent(Namespace :: any(), Key :: any(), Value :: any(), t()) - Upserts a field, which won't be removed during strip operation.
  • append(Namespace :: any(), Key :: any(), Value :: any(), t()) - In order to use this function, a Namespace:Key field must not exist or must be a list. Value is appended to the end of this list. If Value is a list, then a OldValue ++ Value operation is performed. In other cases OldValue ++ [Value] is used.
  • get(Namespace :: any(), Key :: any(), t()) - Returns a value of a specified field. Will crash if the NS:Key is not found.
  • get(Namespace :: any(), Key :: any(), Default :: any(), t()) - Returns a value of a specified field or Default if NS:Key is not found.
  • delete(Namespace :: any(), Key :: any(), t()) - Removes a specified field, no matter if it is permanent or not.
"},{"location":"developers-guide/accumulators/#stripping","title":"Stripping","text":"

Accumulator is used mostly to cache values for reuse within a c2s process; when it goes out to somewhere else, it is stripped of all unnecessary attributes except for the non-strippable ones, e.g.

  • ref
  • timestamp
  • origin_pid
  • origin_location
  • non_strippable - A set of permanent NS:Key pairs.

For a complete list, see mongoose_acc:default_non_strippable/0

If you want it to carry some additional values along with it, please use a dedicated api for setting \"permanent\" fields:

Acc2 = mongoose_acc:set_permanent(myns, myprop, 123, Acc1),\n

Permanent fields may be retrieved with ordinary get/3,4 functions. There are also functions get_permanent_keys/1 and get_permanent_fields/1 for extracting all at once.

The rationale behind stripping an accumulator is that some values stored in it are context-dependent. For example, at the beginning lserver refers to the host of the sender C2S. When an accumulator goes to the c2s of the recipient, the lserver attribute may change. There are also many cached values which are not valid anymore when user changes (e.g. privacy checks).

In order to strip an accumulator, please use strip(strip_params(), t()), where strip_params() is a map of:

  • lserver - New domain. Obviously, may be equal to the old value.
  • host_type - Host type associated with the new domain, if there is one.
  • element, from_jid, to_jid - The same rules apply as in update_stanza/2.
"},{"location":"developers-guide/accumulators/#main-principles-of-an-accumulator-processing","title":"Main principles of an accumulator processing","text":"
  1. An accumulator is created when a stanza enters the server.
  2. An XML stanza is never passed around as a pure exml:element().
  3. An accumulator is stripped when it is passed to a different context (e.g. another c2s process).
  4. If a process produces more stanzas to be routed, they must reuse the original acc but with the stanza replaced with update_stanza/2.
"},{"location":"developers-guide/accumulators/#hooks","title":"Hooks","text":"

Many of the MongooseIM functionalities are implemented in submodules which attach their handlers to hooks (this is covered in detail in \"Hooks and handlers\"). When it comes to the accumulators, the following rules apply:

  • If a hook is related to stanza processing, a Mongoose accumulator should be provided. A hook handler may modify an accumulator in every permitted way (i.e. shouldn't directly modify acc fields, bypassing mongoose_acc API) and should return the execution result in the hook:result field. This is not enforced but should be followed by convention.
  • Avoid passing superfluous arguments to handlers - e.g. an LServer in hook args is redundant since it is already present in the accumulator.

Most handlers have already been modified so that they accept an instance of mongoose_acc:t() as the first argument and return value by storing it inside it. How the accumulator is used within a module is up to the implementers of the module.

"},{"location":"developers-guide/accumulators/#iqs-and-accumulators","title":"IQs and accumulators","text":"

mongoose_iq module exposes a dedicated API for accessing IQ-related accumulator fields. These are:

  • info(Acc) - Returns a #iq{} record produced from a stanza stored in the accumulator. May be invalid or not_iq if the stanza is not a valid IQ.
  • xmlns(Acc) - Returns XMLNS of the first subelement inside an IQ. In most cases it is a namespace of <query/> subelement. May be undefined.
  • command(Acc) - Returns the name of a first subelement inside an IQ. May be undefined.

These functions ensure that cached information matches the accumulator's stanza, so all of them return a tuple with a possibly updated acc as a second element.

"},{"location":"developers-guide/accumulators/#sample-usage-actual-and-potential","title":"Sample usage, actual and potential","text":""},{"location":"developers-guide/accumulators/#privacy-check","title":"Privacy check","text":"

Stanzas are often checked against privacy lists. According to the current mongoose_privacy:privacy_check_packet implementation, the result is stored in an accumulator so if a check has to be repeated it is just one map read.

"},{"location":"developers-guide/accumulators/#tracing","title":"Tracing","text":"

origin_pid and origin_location fields are immutable for the lifespan of a single accumulator. There are many places in the server, where an accumulator may be created, so origin_location makes it much easier to find out what event has triggered the processing, and origin_pid identifies the process in which it happened.

"},{"location":"developers-guide/accumulators/#performance-measurement","title":"Performance measurement","text":"

Given that each accumulator has a timestamp denoting its creation time, it is now very easy to implement a metric showing the stanza processing time, or even multiple metrics splitting it into stages.

"},{"location":"developers-guide/domain_management/","title":"Domain management","text":"

Warning

Some modules do not work with dynamic domains. This is also the case for s2s and the XMPP components (XEP-0114) mechanism, as configured in the listen.service section.

"},{"location":"developers-guide/domain_management/#mongooseim-core-component","title":"MongooseIM core component","text":"

Implemented by mongoose_domain_core module.

It is based on gen_server & ETS table w. public read access. This module is local for the node, it does not implement any sync across the nodes in a cluster. This component is responsible for dynamic routing, it is always started by MIM even if there is no support of dynamic domain names configured.

It provides the following interfaces:

  • Init - accepts the list of initial domain/host_type pairs provided in config file, and the list of host_types that can be used for dynamic insertion. Any of these lists can be empty, initial list of domain/host_type pairs can have some unique host_types not mentioned in the host_types list. The component is initialised by the main MIM supervisor. Implemented in mongoose_domain_sup:start_link/0.
  • Insert - adding new domain/host_type pair. This function is idempotent. It returns success on an attempt to insert the existing data, but fails if ETS already has the domain name associated with another host type. Implemented in mongoose_domain_api:insert_domain(Domain, HostType).
  • Remove - This function is idempotent. It deletes existing domain/host_type pairs. It is impossible to delete domain/host_type pairs specified on init of the component. Implemented in mongoose_domain_api:delete_domain(Domain).
  • Get host type by domain. Implemented in mongoose_domain_api:get_host_type(Domain)..
  • Get all domains configured for the host_type. Implemented in mongoose_domain_api:get_domains_by_host_type(HostType)..
  • Get the list of the host_types provided during initialisation. Implemented in mongoose_domain_api:get_all_static()..

mongoose_domain_core implementation:

  • Has mongoose_domain_core table.
  • Default (initial) domains are static.
  • Disabled or deleted domains are not in mongoose_domain_core.
  • Static domains are non-mutable.
  • Static domains are not replicated.
  • Static domains has priority above DB domains.
"},{"location":"developers-guide/domain_management/#mongooseim-service","title":"MongooseIM service","text":"

As described in Services. Implements the service behaviour. Implemented by service_domain_db module.

This service provides an interface for dynamic management of domain names. It has persistent storage (RDBMS) where it stores information about domain names. This service ensures synchronization of dynamically managed domain names across different nodes in the cluster.

The minimal set of information associated with domain name is this:

  • Host type
  • Status (enabled/disabled)

This service provides the following interfaces:

  • Init - on init all the \u201cenabled\u201d domain names from the persistent storage is added to the core MIM component described above.
  • Add domain name (w/ host type) - This function is idempotent. An added domain is always \u201cenabled\u201d by default - it must be added in the core MIM component described in the previous section. If it\u2019s successfully enabled than Information about the domain name is added into persistent storage and distributed across all the nodes in the cluster.
  • Disabling/Enabling domain name - This function is idempotent. The status of the existing domain is always changed on successful call. If domain name is enabled, then it is added in the core MIM component. On disabling domain name is deleted from the core MIM component. Change of the status is distributed across all the nodes in the cluster.
  • Remove the domain name - This function is idempotent. Domain name is deleted from the core MIM component (if required) and from the DB. This action is distributed across all the nodes in the cluster.

In case of any issues (domain name is already configured with another host_type or host_type is not supported), errors are logged.

The database schema contains two tables:

  • domain_settings - one record per domain. Maps domain name to host_type and enabled status.
  • domain_events - the log of changes. The only reason it exists is that we can track updates in the domain_settings and get apply updates across different nodes. The old events are eventually deleted from the table. Removal is triggered by all nodes of MongooseIM, that have the service configured.

service_domain_db module does two tasks:

  • Initially downloads domains from domain_settings table, using sorting by id.
  • Waits for check_for_updates message and updates core component, depending on records in the domain_events table.

We use id field to sort records when paginating.

"},{"location":"developers-guide/domain_management/#domain-removal","title":"Domain removal","text":"

You cannot delete domains with unknown host-type. Configure host-type first to delete such domains.

Modules which store data in RDBMS and support dynamic domains will remove all persistent data associated with a domain when its removal is requested. This is not the case for NoSQL databases or Mnesia. Because of that, we recommend using RDBMS with dynamic domains. Please note, that mod_auth_token is the only exception for now and does not remove data from RDBMS when removing a domain.

"},{"location":"developers-guide/domain_management/#service-options","title":"Service options","text":"

Described in the services section.

"},{"location":"developers-guide/domain_management/#command-line-interface","title":"Command Line Interface","text":"

You can manage the domains with the mongooseimctl command. Some examples are provided below:

"},{"location":"developers-guide/domain_management/#add-domain","title":"Add domain:","text":"
./mongooseimctl domain addDomain --domain example.com --hostType type1\n
"},{"location":"developers-guide/domain_management/#delete-domain","title":"Delete domain:","text":"
./mongooseimctl domain removeDomain --domain example.com --hostType type1\n
"},{"location":"developers-guide/domain_management/#disable-domain","title":"Disable domain:","text":"
./mongooseimctl domain disableDomain --domain example.com\n
"},{"location":"developers-guide/domain_management/#enable-domain","title":"Enable domain:","text":"
./mongooseimctl domain enableDomain --domain example.com\n

Run ./mongooseimctl domain to get more information about all supported operations.

"},{"location":"developers-guide/domain_management/#api","title":"API","text":"

You can manage domains with one of our API's:

  • The GraphQL API has the same funtionality as the command line interface. The queries and mutations for domains are grouped under the domain category.
  • The REST API (deprecated) supports domain management as well. See Dynamic Domains for details.
"},{"location":"developers-guide/hooks_description/","title":"Selected hooks description","text":"

This is a brief documentation for a few selected hooks. Though hooks & handlers differ in what they are there to do, it is not necessary to describe them all, because the mechanism is general. The following is meant to give you the idea of how the hooks work, what they are used for and the various purposes they can serve.

"},{"location":"developers-guide/hooks_description/#user_send_","title":"user_send_*","text":"

mongoose_c2s_hooks:user_send_packet(HostType, Acc, Params)\nmongoose_c2s_hooks:user_send_message(HostType, Acc, Params)\nmongoose_c2s_hooks:user_send_presence(HostType, Acc, Params)\nmongoose_c2s_hooks:user_send_iq(HostType, Acc, Params)\nmongoose_c2s_hooks:user_send_xmlel(HostType, Acc, Params)\n
These hooks are run in mongoose_c2s after the C2S process receives an XML element from the client.

The hooks won't be called for stanzas arriving from a user served by a federated server (i.e. on a server-to-server connection handled by ejabberd_s2s).

The logic depends on the C2S state, which changes during the connection, authentication and resource binding process:

"},{"location":"developers-guide/hooks_description/#hooks-called-for-session_established","title":"Hooks called for session_established","text":"

Some rudimentary verification of the stanza is done once it is received from the socket:

  • if present, the from attribute of the stanza is checked against the identity of the user whose session the process in question serves; if the identity does not match the contents of the attribute, an error is returned,
  • the recipient JID (to attribute) format is verified.

After successful checks, the following hooks are called. The first one is user_send_packet, which is called for all received XML elements. Next, depending on the type of the element, one of the following hooks is called:

  • user_send_message for messages,
  • user_send_presence for presences,
  • user_send_iq for IQ (info/query) stanzas,
  • user_send_xmlel for other XML elements.

These type-specific hooks should be used instead of user_send_packet when possible.

"},{"location":"developers-guide/hooks_description/#hooks-called-for-other-states","title":"Hooks called for other states","text":"

If the session is not established (e.g. the client hasn't authenticated or its resource is not bound yet), only the user_send_xmlel hook is called regardless of the XML element type. No other user_send_* hooks are called, and no stanza checks are performed.

"},{"location":"developers-guide/hooks_description/#handler-examples","title":"Handler examples","text":"

These hooks are handled by the following modules:

  • mod_blocking - handles IQ requests for blocking lists.
  • mod_caps - detects and caches capability information sent with certain presences for later use.
  • mod_carboncopy - forwards messages to all the user's resources which have carbon copying enabled.
  • mod_event_pusher - sends selected messages to an external service.
  • mod_inbox - stores messages in the user's inbox.
  • mod_mam - stores outgoing messages in an archive.
  • mod_ping - upon reception of every message from the client, this module (re)starts a timer; if nothing more is received from the client within 60 seconds, it sends an IQ ping, to which the client should reply - which starts another timer.
  • mod_presence - handles presence stanzas, updating the user presence state and broadcasting presence updates.
  • mod_privacy - filters sent stanzas according to privacy lists and handles privacy-related IQ requests.
  • mod_register - registers a new user when a registration IQ is received. user_send_xmlel is used because the stanza is received while the session is not established.
  • mod_smart_markers - checks if the stanza contains chat markers info and stores the update.
  • mod_stream_management - counts stanzas sent by the client and handles special XML elements like <a> and <enable>.
"},{"location":"developers-guide/hooks_description/#filter_packet-and-filter_local_packet","title":"filter_packet and filter_local_packet","text":"
mongoose_hooks:filter_packet({From, To, Acc, Packet})\nmongoose_hooks:filter_local_packet({From, To, Acc, Packet})\n

These hooks are run when the packet is being routed by ejaberd_router:route/4, which is the most general function used to route stanzas across the entire cluster. For example, mongoose_c2s calls it after calling the user_send_message or user_send_iq hook, and multiple modules use it for sending replies and errors.

  • filter_packet is run by mongoose_router_global for all routed packets. It is called at the start of the routing procedure.
  • filter_local_packet is run by mongoose_local_delivery when the packet is being routed to a domain hosted by the local server.

The handlers expect the {From, To, Acc, Packet} accumulator as their first argument. The stanza can be filtered out (in case the handler returns drop), left unchanged or modified.

filter_packet is a global hook

Note the hook code inside mongoose_hooks:

filter_packet(Acc) ->\n    run_global_hook(filter_packet, Acc, #{}).\n
This hook is run not for a host type, but globally across the whole cluster. Keep that in mind when registering the handlers and appropriately use the atom global instead of a host type as the second argument.

"},{"location":"developers-guide/hooks_description/#handler-examples_1","title":"Handler examples","text":"

These hooks are handled by the following modules:

  • mod_domain_isolation - filters out cross-domain stanzas.
  • mod_event_pusher - sends out configured events (e.g. push notifications) for incoming stanzas.
  • mod_inbox - stores incoming messages in the recipient's inbox.
  • mod_mam - stores incoming messages in the recipient's archive, and adds MAM-related elements to the message.
  • mod_pubsub - for each subscription authorization form sent by a node owner, the subscription state is updated, and the stanza is dropped.
  • mod_smart_markers - filters out chat markers, because they are handled separately by mod_offline_chatmarkers.
"},{"location":"developers-guide/hooks_description/#user_receive_","title":"user_receive_*","text":"
mongoose_c2s_hooks:user_receive_packet(HostType, Acc, Params)\nmongoose_c2s_hooks:user_receive_message(HostType, Acc, Params)\nmongoose_c2s_hooks:user_receive_presence(HostType, Acc, Params)\nmongoose_c2s_hooks:user_receive_iq(HostType, Acc, Params)\nmongoose_c2s_hooks:user_receive_xmlel(HostType, Acc, Params)\n

These hooks are run in mongoose_c2s after the recipient's C2S process receives an XML element and before sending it to the user.

The hooks won't run for stanzas which are destined to users of a different XMPP domain served by a federated server, connection to which is handled by ejabberd_s2s.

The first hook is user_receive_packet, which is called for all received XML elements. Next, depending on the type of the stanza, one of the following hooks is called:

  • user_receive_message for messages,
  • user_receive_presence for presences,
  • user_receive_iq for IQ (info/query) stanzas,
  • user_receive_xmlel for other XML elements.

These type-specific hooks should be used instead of user_receive_packet when possible.

"},{"location":"developers-guide/hooks_description/#handler-examples_2","title":"Handler examples","text":"

These hooks are handled by the following modules:

  • mod_caps - detects and caches capability information sent with certain messages for later use.
  • mod_carboncopy - forwards messages to all the user's resources which have carbon copying enabled.
  • mod_last - filters queries for user's last activity according to presence subscriptions.
  • mod_presence - handles incoming presences from other users, updating the presence status, and responds to presence probes.
  • mod_privacy - filters received stanzas according to privacy lists.
  • mod_stream_management - filters out stanzas with conflicting session ID's.
"},{"location":"developers-guide/hooks_description/#offline_message","title":"offline_message","text":"
mongoose_hooks:offline_message(Acc, From, To, Packet)\n

ejabberd_sm runs this hook for each message which cannot be delivered, because no resource (i.e. device or desktop client application) of its recipient is available online for delivery.

"},{"location":"developers-guide/hooks_description/#handler-examples_3","title":"Handler examples","text":"

This hook is handled by the following modules, listed in the order of execution:

  • mod_offline_chatmarkers - for chat marker messages, the handler stores them and returns {stop, Acc}, preventing further handlers from being called.

  • mod_offline - stores messages in a persistent way until the recipient comes online, and the message can be successfully delivered. The handler returns {stop, Acc} for all messages, preventing further handlers from being called.

  • mod_offline_stub - returns {stop, Acc} for all messages, preventing further handlers from being called.

  • ejabberd_sm - calls ejabberd_sm:bounce_offline_message, which responds with the <service-unavailable/> stanza error. In the case of using mod_mam the message is actually stored, and no such error should be sent - that's why the module mod_offline_stub can be enabled.

"},{"location":"developers-guide/hooks_description/#remove_user","title":"remove_user","text":"
mongoose_hooks:remove_user(Acc, LServer, LUser)\n

remove_user is run by ejabberd_auth - the authentication module - when a request is made to remove the user from the database of the server.

"},{"location":"developers-guide/hooks_description/#handler-examples_4","title":"Handler examples","text":"

This hook is used by multiple modules, since removing a user requires many cleanup operations:

  • mod_auth_token removes user's authentication tokens;
  • mod_event_pusher disables user's push notifications;
  • mod_inbox removes user's inbox;
  • mod_last removes last activity information (XEP-0012: Last Activity);
  • mod_mam removes the user's message archive;
  • mod_muc_light quits multi-user chat rooms;
  • mod_offline deletes the user's offline messages;
  • mod_privacy removes the user's privacy lists;
  • mod_private removes the user's private xml data storage;
  • mod_pubsub unsubscribes from publish/subscribe channels;
  • mod_roster removes the user's roster from the database;
  • mod_smart_markers removes chat markers stored for the user;
  • mod_vcard removes user's vCard information.
"},{"location":"developers-guide/hooks_description/#node_cleanup","title":"node_cleanup","text":"
mongoose_hooks:node_cleanup(Node)\n

node_cleanup is run by a mongooseim_cleaner process which subscribes to nodedown messages. Currently, the hook is run inside a global transaction (via global:trans/4).

The job of this hook is to remove all processes registered in Mnesia. MongooseIM uses Mnesia to store processes through which messages are then routed - like user sessions or server-to-server communication channels - or various handlers, e.g. IQ request handlers. Those must obviously be removed when a node goes down, and to do this the modules ejabberd_local, ejabberd_router, ejabberd_s2s, ejabberd_sm and mod_bosh register their handlers with this hook.

Number of retries for this transaction is set to 1 which means that in some situations the hook may be run on more than one node in the cluster, especially when there is little garbage to clean after the dead node. Setting retries to 0 is not good decision as it was observed that in some setups it may abort the transaction on all nodes.

"},{"location":"developers-guide/hooks_description/#session_opening_allowed_for_user","title":"session_opening_allowed_for_user","text":"
allow == mongoose_hooks:session_opening_allowed_for_user(HostType, JID)\n

This hook is run after authenticating when user sends the IQ opening a session. Handler function are expected to return:

  • allow if a given JID is allowed to open a new sessions (the default)
  • deny if the JID is not allowed but other handlers should be run
  • {stop, deny} if the JID is not allowed but other handlers should not be run

In the default implementation the hook is not used, built-in user control methods are supported elsewhere. This is the perfect place to plug in custom security control.

"},{"location":"developers-guide/hooks_description/#other-hooks","title":"Other hooks","text":"
  • acc_room_affiliations
  • adhoc_local_commands
  • adhoc_sm_commands
  • amp_check_condition
  • amp_determine_strategy
  • amp_verify_support
  • anonymous_purge
  • auth_failed
  • c2s_stream_features
  • can_access_identity
  • can_access_room
  • caps_recognised
  • check_bl_c2s
  • disco_info
  • disco_local_features
  • disco_local_identity
  • disco_local_items
  • disco_muc_features
  • disco_sm_features
  • disco_sm_identity
  • disco_sm_items
  • does_user_exist
  • extend_inbox_result
  • failed_to_store_message
  • filter_local_packet
  • filter_packet
  • filter_pep_recipient
  • filter_room_packet
  • filter_unacknowledged_messages
  • forbidden_session
  • foreign_event
  • forget_room
  • get_key
  • get_mam_muc_gdpr_data
  • get_mam_pm_gdpr_data
  • get_pep_recipients
  • get_personal_data
  • inbox_unread_count
  • invitation_sent
  • is_muc_room_owner
  • join_room
  • leave_room
  • mam_archive_id
  • mam_archive_message
  • mam_archive_size
  • mam_archive_sync
  • mam_flush_messages
  • mam_get_behaviour
  • mam_get_prefs
  • mam_lookup_messages
  • mam_muc_archive_id
  • mam_muc_archive_message
  • mam_muc_archive_size
  • mam_muc_archive_sync
  • mam_muc_flush_messages
  • mam_muc_get_behaviour
  • mam_muc_get_prefs
  • mam_muc_lookup_messages
  • mam_muc_remove_archive
  • mam_muc_retraction
  • mam_muc_set_prefs
  • mam_remove_archive
  • mam_retraction
  • mam_set_prefs
  • mod_global_distrib_known_recipient
  • mod_global_distrib_unknown_recipient
  • node_cleanup
  • offline_groupchat_message
  • offline_message
  • packet_to_component
  • presence_probe
  • privacy_check_packet
  • privacy_get_user_list
  • privacy_iq_get
  • privacy_iq_set
  • privacy_list_push
  • privacy_updated_list
  • push_notifications
  • register_subhost
  • register_user
  • remove_domain
  • remove_user
  • reroute_unacked_messages
  • resend_offline_messages
  • room_exists
  • room_new_affiliations
  • room_packet
  • roster_get
  • roster_get_jid_info
  • roster_get_subscription_lists
  • roster_get_versioning_feature
  • roster_groups
  • roster_in_subscription
  • roster_out_subscription
  • roster_process_item
  • roster_push
  • roster_set
  • s2s_allow_host
  • s2s_receive_packet
  • s2s_send_packet
  • s2s_stream_features
  • session_cleanup
  • session_opening_allowed_for_user
  • set_presence
  • set_vcard
  • sm_filter_offline_message
  • sm_register_connection
  • sm_remove_connection
  • unacknowledged_message
  • unregister_subhost
  • unset_presence
  • update_inbox_for_muc
  • user_available
  • user_open_session
  • user_ping_response
  • user_receive_iq
  • user_receive_message
  • user_receive_packet
  • user_receive_presence
  • user_receive_xmlel
  • user_send_iq
  • user_send_message
  • user_send_packet
  • user_send_presence
  • user_send_xmlel
  • user_socket_closed
  • user_socket_error
  • user_stop_request
  • user_terminate
  • vcard_set
  • xmpp_bounce_message
  • xmpp_presend_element
  • xmpp_send_element
  • xmpp_stanza_dropped
"},{"location":"developers-guide/logging/","title":"Logging","text":"

To use logger in your module, include

-include(\"mongoose_logger.hrl\").\n
or
-include(\"mongoose.hrl\").\n

"},{"location":"developers-guide/logging/#logging-macros","title":"Logging macros","text":"

There are several macros for the most common logging levels:

?LOG_DEBUG(#{what => debug_event, info => Arg}),\n?LOG_INFO(#{what => info_event, info => Arg}),\n?LOG_NOTICE(#{what => notice_event, info => Arg}),\n?LOG_WARNING(#{what => warning_event, info => Arg}),\n?LOG_ERROR(#{what => error_event, info => Arg}),\n?LOG_CRITICAL(#{what => critical_event, info => Arg}),\n

Use them in correspondence with the appropriate log level. Please be mindful of what is logged and which log level is used for it.

"},{"location":"developers-guide/logging/#logging-levels","title":"Logging levels","text":"

A system operator can choose the global log level by setting loglevel in mongooseim.toml.

Possible values are the standard syslog severity levels, plus all or none: \"all\", \"debug\", \"info\", \"notice\", \"warning\", \"error\", \"critical\", \"alert\", \"emergency\", and \"none\".

[general]\n  loglevel = \"notice\"\n

If a user sets the log level to all, then they would see all messages in logs.

Levels warning and error are the most commonly used for production systems.

"},{"location":"developers-guide/logging/#logging-format","title":"Logging format","text":"

We use structured logging as inspired by Ferd's post. We also use a modified logfmt format as one of the possible default logger formatters. This format is Splunk and ELK friendly. Check the list of fields for fields documentation.

what => something_interesting field is required.

    ?LOG_ERROR(#{what => check_password_failed,\n                 reason => Error, user => LUser})\n\n    try ...\n    catch\n        Class:Reason:StackTrace ->\n            ?LOG_ERROR(#{what => check_password_failed,\n                         class => Class, reason => Reason, stacktrace => StackTrace}),\n            erlang:raise(Class, Reason, StackTrace)\n    end\n

Field user => <<\"alice\">> is often used too.

A common way to name an error event is what => function_name_failed. For example, what => remove_user_failed. Use the advice critically, it would not work well for any function. Counterexample:

handle_info(Info, State) ->\n    ?LOG_WARNING(#{what => unexpected_message, msg => Info}),\n    {noreply, State}.\n
"},{"location":"developers-guide/logging/#filtering-logs-by-module","title":"Filtering logs by module","text":"

Setting loglevel to debug can lead to a flood of messages in logs. To set a different loglevel for just one module, call:

mongoose_logs:set_global_loglevel(error).\nmongoose_logs:set_module_loglevel(mod_mam, debug).\n

This code sets the loglevel to error for all log messages, except for those generated by mod_mam. All messages from mod_mam would be logged.

"},{"location":"developers-guide/mod_amp_developers_guide/","title":"The Developer's Guide to mod_amp","text":"

This is a quick, introductory guide for developers wishing to extend mod_amp or plug into the message processing system.

"},{"location":"developers-guide/mod_amp_developers_guide/#source-files-headers-and-tests","title":"Source Files, Headers and Tests","text":"
  • include/amp.hrl This header file contains the amp XML namespace and the types used by mod_amp: amp_rule() and amp_strategy() are the top-level points of interest.

  • src/mod_amp.erl This module is responsible for plugging in all the other components. It's main driving function is filter_packet. After determining that a given message contains amp rules, the module proceeds by determining its strategy for the message and comparing it against the rules. The server may return an error at multiple points in its work-flow. This is signaled by calling the function send_error_and_drop/3 or send_errors_and_drop/2.

  • src/amp.erl This module is responsible for parsing rules from incoming elements and serializing server responses in the proper format. binaries_to_rule/3 can return either a proper amp_rule(), or an amp_invalid_rule(), which does not contain sensible values, but can be used by the server to create an appropriate error message.

  • test/amp_SUITE.erl Tests for the API functions exported by amp.erl

  • src/amp_strategy.erl This module is where the server-side hook for determining a default action for a given message is performed. Calls to ejabberd_sm are made here.

  • src/amp_resolver.erl This module models the resolution of amp rules, given a certain strategy. Also, the function verify_rule_support is hard-coded here to return an unsupported- type error for unsupported rule actions and values.

  • test/amp_resolver_SUITE.erl These tests verify that the amp_resolver:check_condition/3 hook works as intended, i.e: that the rules which would be triggered given a particular server-side strategy actually do get triggered, and that all others get rejected.

  • test/amp_gen.erl This module contains PropEr generators for server-side strategies, as well as valid and invalid amp rules. Used in both test suites.

"},{"location":"developers-guide/mod_amp_developers_guide/#hooks-for-other-modules","title":"Hooks for Other Modules","text":"

If your module would like to have some say in the amp decision making process, please refer to the hooks: amp_determine_strategy and amp_check_condition. Remember that the hook for check_condition is a fold on a boolean(), and should behave like a variadic or. I.e: once a rule is deemed to apply, other hooks SHOULD NOT revert this value to false.

Cf. this code from amp_resolver:

-spec check_condition(any(), amp_strategy(), amp_condition(), amp_value())\n                          -> boolean().\ncheck_condition(HookAcc, Strategy, Condition, Value) ->\n    case HookAcc of\n        true -> true;   %% SOME OTHER HOOK HAS DECIDED THAT THIS RULE APPLIES %%\n        _    -> resolve(Strategy, Condition, Value) %% PERFORM LOCAL CHECK %%\n    end.\n
"},{"location":"developers-guide/mod_amp_developers_guide/#ideas-for-further-development","title":"Ideas for Further Development","text":""},{"location":"developers-guide/mod_amp_developers_guide/#easy","title":"Easy","text":"
  • Implement the 'alert' and 'drop' action types.
  • Implement support for the 'stored' value for 'deliver'
"},{"location":"developers-guide/mod_amp_developers_guide/#medium","title":"Medium","text":"
  • Implement the security policy described in the third bullet point of XEP-0079, Section 9 (Security Considerations). This will require that amp_resolver:verify_support also take the {From, To, Packet} :: hook_data() parameter and check that From is permitted to know about To's presence. If they are not, then the server should treat this as a not-acceptable amp request.

  • Make support for various actions, conditions and values configurable. This will require implementing an intelligent mechanism for matching the user-supplied rules with what's configured server-side. Currently, server-side support is hard-coded in several places:

    1. Disco announcements are in mod_amp:amp_features/0
    2. Rule support is in amp_resolver:verify_rule_support/1
    3. Every other function that deals with rules can handle unsupported rules, but ignores their meaning and decides that these rules don't apply.
"},{"location":"developers-guide/mod_amp_developers_guide/#hard","title":"Hard","text":"
  • Implement support for the 'expire-at' condition.
"},{"location":"developers-guide/mod_muc_light_developers_guide/","title":"The Developer's Guide to mod_muc_light","text":"

This is an in-depth guide on mod_muc_light design decisions and implementation.

"},{"location":"developers-guide/mod_muc_light_developers_guide/#source-header-and-test-suite-files","title":"Source, header and test suite files","text":"

All source files can be found in src/muc_light/.

  • mod_muc_light.erl

Main module. It implements the gen_mod behaviour. It subscribes to some essential hooks and exports several functions, mostly callbacks. It handles integration with mod_disco, mod_privacy and mod_roster. All operations that take place outside the room (including the room creation) are implemented here. Last but not least - this module prevents service-unavailable errors being sent when an offline user receives a groupchat message.

  • mod_muc_light_codec_backend.erl

A behaviour implemented by modules that translate the MUC Light internal data format to stanzas for clients and vice versa. Besides specifying callbacks, it implements generic error encoder function.

  • mod_muc_light_codec_legacy.erl

An implementation of XEP-0045 compatibility mode. Note, that while some parts of the legacy mode are implemented directly in mod_muc_light.erl, the stanza translation takes place here. It does not utilise the full potential of the MUC Light extension but allows using the standard MUC implementation in XMPP client libraries for prototyping or the transition phase. Not recommended for production systems (less efficient than modern codec and requires more round-trips).

  • mod_muc_light_codec_modern.erl

An implementation of a modern MUC Light protocol, described in the XEP. Supports all MUC Light features.

  • mod_muc_light_db_backend.erl

A behaviour implemented by database backends for the MUC Light extension.

  • mod_muc_light_db_mnesia.erl

A Mnesia backend for this extension. Uses transactions for room metadata updates (configuration and affiliation list) and dirty reads whenever possible.

  • mod_muc_light_db_rdbms.erl

An SQL backend for mod_muc_light. create_room, destroy_room, remove_user, set_config, modify_aff_users execute at least one query in a single transaction. room_exists, get_user_rooms, get_user_rooms_count, get_config, get_blocking, set_blocking, get_aff_users execute only one query per function call. get_info executes 3 SELECT queries, not protected by a transaction.

  • mod_muc_light_db_rdbms_sql.erl

SQL queries for mod_muc_light_db_rdbms.erl.

  • mod_muc_light_room.erl

This module handles everything that occurs inside the room: access checks, metadata changes, message broadcasting etc.

  • mod_muc_light_utils.erl

Utilities shared by other MUC Light modules. It includes the room configuration processing and the affiliation logic.

The header file can be found in include/.

  • mod_muc_light.hrl

It contains definitions of MUC Light namespaces, default configuration options and several common data types and records.

There are 2 test suites and one helper module in big_tests/tests.

  • muc_light_SUITE.erl

Main test suite, checks all the most important functionalities of the MUC Light extension.

  • muc_light_legacy_SUITE.erl

muc_light_SUITE.erl equivalent that uses XEP-0045 compatibility mode.

  • muc_helper.erl

Provides handy iterators over room participants. Used in MUC Light suites but in the future could be used in muc_SUITE as well.

"},{"location":"developers-guide/mod_muc_light_developers_guide/#hooks-handled-by-this-extension","title":"Hooks handled by this extension","text":"
  • offline_groupchat_message handled by mod_muc_light:prevent_service_unavailable/3

Prevents the default behaviour of sending service-unavailable error to the room when a groupchat message is sent to an offline occupant.

  • remove_user handled by mod_muc_light:remove_user/2

Triggers DB cleanup of all data related to the removed user. Includes a broadcast of a notification about user removal from occupied rooms.

  • disco_local_items handled by mod_muc_light:get_muc_service/5

Adds a MUC service item to the Disco result. Uses either a MUC Light or a classic MUC namespace when the legacy mode is enabled.

  • roster_get handled by mod_muc_light:add_rooms_to_roster/2

Injects room items to the user's roster.

  • privacy_iq_get, privacy_iq_set handled by mod_muc_light:process_iq_get/5 and mod_muc_light:process_iq_set/4 respectively

These callbacks handle blocking settings when legacy mode is enabled.

  • is_muc_room_owner, can_access_room, can_access_identity used by mod_muc_light:is_room_owner/3, mod_muc_light:can_access_room/3 and mod_muc_light:can_access_identity/3 respectively

Callbacks that provide essential data for the mod_mam_muc extension.

"},{"location":"developers-guide/mod_muc_light_developers_guide/#hooks-executed-by-this-extension","title":"Hooks executed by this extension","text":"
  • filter_room_packet by codecs

Allows mod_mam_muc to archive groupchat messages.

  • forget_room by mod_muc_light_db_mnesia and mod_muc_light_room

It is a part of mod_mam_muc integration as well. A hook used for MAM cleanup upon room destruction.

"},{"location":"developers-guide/mod_muc_light_developers_guide/#advantages-and-drawbacks-compared-to-classic-muc","title":"Advantages and drawbacks (compared to classic MUC)","text":"

The new MUC implementation brings quite a few benefits to the table:

  • It is fully distributed - Does not have SPOF, concurrent senders do not block each other, especially in large rooms. Message broadcasting is being done in sender c2s context.
  • It does not use presences - Much less traffic and stable membership information, especially on mobile networks.
  • It provides built-in blocking support - Instead of blocking traffic like Privacy Lists do, it handles blocklists internally, preventing the blocker from being added to or by blocked entities.
  • Less round-trips - A room can be created and configured with an initial list of occupants with a single request.
  • Versioning - Reduces traffic and allows clients to reliably and quickly detect that the room state has changed.
  • Isolation - Processing errors are contained in a sender context, not affecting other room occupants.
  • Fully customisable room configuration - Your users can store any meta room information you allow.

Drawbacks are:

  • Requires DB transactions to ensure Room state consistency.
  • Fetches the occupant list from DB for every message that is broadcasted.
  • Due to concurrent message broadcast, it is possible for occupants to receive messages in a different order (given the messages are broadcasted at the exactly same time).
  • With stream resumption disabled or when resumption times out, user may miss a message in a following scenario:
  • Message A archived
  • Message B archived
  • Message B delivered to the user
  • User loses connection
  • Resumption timeout
  • User queries MAM for all messages after B and misses A
"},{"location":"developers-guide/mod_muc_light_developers_guide/#ideas-for-further-development","title":"Ideas for Further Development","text":""},{"location":"developers-guide/mod_muc_light_developers_guide/#easy","title":"Easy","text":"
  • Add more tests for negative cases
"},{"location":"developers-guide/mod_muc_light_developers_guide/#medium","title":"Medium","text":"
  • Add optional per-room processes to avoid the need of DB transactions and ensure message ordering (maybe \"hard\"?).
  • Redis backend
"},{"location":"developers-guide/mod_muc_light_developers_guide/#hard","title":"Hard","text":"
  • Room metadata cache (maybe \"medium\"?).
"},{"location":"developers-guide/mongoose_wpool/","title":"mongoose_wpool","text":"

All the outgoing pools configured by the outgoing_pools option are hidden behind the mongoose_wpool API. Every pool is described by a tuple {Type, Host, Tag, PoolOptions, ConnectionOptions} (see outgoing pools for details about each element of the tuple).

"},{"location":"developers-guide/mongoose_wpool/#supervision-tree","title":"Supervision tree","text":"
  • mongoose_wpool_sup supervisor for every type of the pool. Under it there can be many children of:
    • mongoose_wpool_type_sup is started on-demand when a pool of given type is started. Many pools of the same type are supervised by the supervisor. Its children are:
      • mongoose_wpool_mgr all the pools of the same type are managed by a manager. It's responsible for starting, stopping and restarting the pool. Restarting happens when the main worker_pool process for the pool is stopped unintentionally. This usually happens when there was too many restarts of worker processes.
      • many worker_pool supervisors holding a specific pool are on the same level as the manager.

The mongoose_wpool_mgr manages the pool by setting monitor for every started pool.

"},{"location":"developers-guide/mongoose_wpool/#implementing-new-pool-type","title":"Implementing new pool type","text":"

To add a new pool type, create a mongoose_wpool_NEW_TYPE module implementing the mongoose_wpool behaviour. This means that for a new type xyz we need to create a mongoose_wpool_xyz module. Then we can use the xyz type to start the pool via outgoing_pools option or directly via the mongoose_wpool API.

"},{"location":"developers-guide/release_config/","title":"Release/Installation configuration","text":""},{"location":"developers-guide/release_config/#advanced-release-configuration","title":"Advanced release configuration","text":"

It's now possible to install MongooseIM from source in two modes:

  • system - it's used internally to generate Linux packages (.deb, .rpm)
  • user - which is the default mode and used for testing on GitHub Actions and in development

You can also build OS specific packages by using the tools in [MongooseIM repo root]/tools/pkg - refer to README.md therein.

"},{"location":"developers-guide/release_config/#configure-script","title":"Configure script","text":"

The tools/configure script can be used to specify which 3rd party dependencies should be included in the final release or to set the installation prefix and installation mode. More details can found in the tool's help. The help is printed when the script is run without any parameters tools/configure:

configure: OPTIONS\n\nSpecifies which 3rd party deps will be included in the release.\nWrites configure.out file as output - this file can be sourced with:\n\n    . configure.out\n\nWrites rel/configure.vars.config which can be used as Reltool input.\n\n3rd party apps:\n\n    with-none           include no 3rd party drivers\n    with-all            include all drivers\n    with-mysql          include mysql driver\n    with-odbc           include an ODBC driver (requires unixodbc to compile)\n    with-pgsql          include pgsql driver\n    with-redis          include redis driver\n\nOptions:\n\n    prefix    Installation PREFIX directory. Default: /usr/local\n    system    Install files into $PREFIX/{bin, etc, ...} instead of a completely self contained release. Default: no\n    user      System user to run the server as. Default:\n

This script is also accessible via the make configure target.

"},{"location":"developers-guide/release_config/#example","title":"Example","text":"

If mysql and redis are the only drivers that should be included in the release, run the following command before make rel:

$ ./tools/configure with-mysql with-redis\n

You only need to run the ./tools/configure command once (unless changing the release's config is needed to include some other dependencies).

"},{"location":"developers-guide/release_config/#system-install","title":"System install","text":"

To manually test the installation run tools/test-install.sh. This script is intended for careful inspection by a human user, not for automation. Results should be similar to those described below.

On Mac:

./tools/configure with-all user=erszcz prefix=/tmp/mim-sandbox-system system=yes\ncat configure.out rel/configure.vars.config\nRUNNER_GROUP=staff make install\n

Overriding RUNNER_GROUP on a Mac is necessary, as users by default don't have private groups of the same name as their usernames.

Generated build configs:

$ cat configure.out rel/configure.vars.config\nexport MONGOOSEIM_CONFIGURED=\"yes\"\nexport APPS=\"mysql eodbc epgsql eredis nksip cqerl tirerl erlcloud\"\nexport PREFIX=\"/tmp/mim-sandbox-system\"\nexport RELTOOL_VARS=\"rel/configure.vars.config\"\nexport SYSTEM=\"yes\"\nexport RUNNER_USER=\"erszcz\"\nexport BIN_DIR=\"$PREFIX/usr/bin\"\nexport ETC_DIR=\"$PREFIX/etc/mongooseim\"\nexport LIB_DIR=\"$PREFIX/usr/lib/mongooseim\"\nexport LOG_DIR=\"$PREFIX/var/log/mongooseim\"\nexport MDB_DIR=\"$PREFIX/var/lib/mongooseim\"\nexport LOCK_DIR=\"$PREFIX/var/lock/mongooseim\"\nexport PID_DIR=\"$PREFIX/var/lib/mongooseim\"\nexport STATUS_DIR=\"$PREFIX/var/lib/mongooseim\"\n{mongooseim_runner_user, \"erszcz\"}.\n{mongooseim_script_dir, \"/tmp/mim-sandbox-system/usr/lib/mongooseim/bin\"}.\n{mongooseim_etc_dir, \"/tmp/mim-sandbox-system/etc/mongooseim\"}.\n{mongooseim_log_dir, \"/tmp/mim-sandbox-system/var/log/mongooseim\"}.\n{mongooseim_mdb_dir, \"/tmp/mim-sandbox-system/var/lib/mongooseim\"}.\n{mongooseim_pid_dir, \"/tmp/mim-sandbox-system/var/lib/mongooseim\"}.\n{mongooseim_status_dir, \"/tmp/mim-sandbox-system/var/lib/mongooseim\"}.\n{mongooseim_mdb_dir_toggle, []}.\n{mongooseim_lock_dir, \"/tmp/mim-sandbox-system/var/lock/mongooseim\"}.\n

Installed tree:

$ tree mim-sandbox-system/ -L 3\nmim-sandbox-system/\n\u251c\u2500\u2500 etc\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 mongooseim\n\u2502\u00a0\u00a0     \u251c\u2500\u2500 app.config\n\u2502\u00a0\u00a0     \u251c\u2500\u2500 mongooseim.toml\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 vm.args\n\u251c\u2500\u2500 usr\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 bin\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 mongooseimctl\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 lib\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 mongooseim\n\u2514\u2500\u2500 var\n    \u251c\u2500\u2500 lib\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 mongooseim\n    \u251c\u2500\u2500 lock\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 mongooseim\n    \u2514\u2500\u2500 log\n        \u2514\u2500\u2500 mongooseim\n\n13 directories, 4 files\n

Files which change after starting and stopping such an installation:

var/lib/mongooseim/DECISION_TAB.LOG\nvar/lib/mongooseim/LATEST.LOG\nvar/lib/mongooseim/last_activity.DCD\nvar/lib/mongooseim/muc_registered.DCD\nvar/lib/mongooseim/muc_room.DCD\nvar/lib/mongooseim/offline_msg.DAT\nvar/lib/mongooseim/passwd.DCD\nvar/lib/mongooseim/privacy.DCD\nvar/lib/mongooseim/private_storage.DAT\nvar/lib/mongooseim/roster.DCD\nvar/lib/mongooseim/roster_version.DCD\nvar/lib/mongooseim/schema.DAT\nvar/lib/mongooseim/vcard.DAT\nvar/lib/mongooseim/vcard_search.DCD\nvar/lib/mongooseim/pid\nvar/lib/mongooseim/status\nvar/log/mongooseim/crash.log\nvar/log/mongooseim/mongooseim.log\nvar/log/mongooseim/erlang.log.1\nvar/log/mongooseim/run_erl.log\n
"},{"location":"developers-guide/release_config/#caveats","title":"Caveats","text":"
  • Running make install will blindly overwrite any configs it encounters on its way. Mnesia database and log files are preserved only due to the fact that they're not build process artifacts.
"},{"location":"developers-guide/xep_tool/","title":"XEP-tool usage","text":"

The XEP-tool is the answer for developers who wonder how to maintain an actual list of supported XEPs. It's a fast and easy way to automatically produce documentation from raw, beam files. This is a quick guide on how to enjoy the usage of the XEP-tool.

"},{"location":"developers-guide/xep_tool/#sign-your-module-file-first","title":"Sign your module file first","text":"

The architecture of MongooseIM determines that almost every XEP or feature implementation resides in its own file. It is not strictly enforced but usually the file is named with a mod_ prefix. For example mod_privacy file implements XEP-0016: Privacy Lists.

"},{"location":"developers-guide/xep_tool/#mandatory-xep-and-version","title":"Mandatory xep and version","text":"

In order to let the XEP-tool know about your module, we add a special attribute xep at the beginning of the mod_privacy module:

-xep([{xep, 16}, {version, \"1.6\"}]).\n

Now we know that this module implements to XEP-0016: Privacy Lists with version 1.6.

It gives the tool enough information to generate a URL to the XEP homepage. If the module implements a ProtoXEP, the xep property should be an atom:

-xep([{xep, 'muc-light'}, {version, \"0.0.1\"}]).\n

You ought to remember to specify xep and version properties every time. You can also put several xep attributes in one module. For example mod_roster implements attributes of XEP-0083: Nested Roster Groups, XEP-0093: Roster Item Exchange and XEP-0237: Roster Versioning. Just list them one after another:

-xep([{xep, 237}, {version, \"1.3\"}]).\n-xep([{xep, 83}, {version, \"1.0\"}]).\n-xep([{xep, 93}, {version, \"1.2\"}]).\n
"},{"location":"developers-guide/xep_tool/#status","title":"Status","text":"

By default the status is complete. You can also mark the implementation status as partial:

-xep([{xep, 79}, {version, \"1.2\"}, {status, partial}]).\n
"},{"location":"developers-guide/xep_tool/#legacy-versions","title":"Legacy versions","text":"

Sometimes the implementation is backwards-compatible, supporting legacy namespaces defined in a particular historical version. You can list such versions as below:

-xep([{xep, 313}, {version, \"0.6\"}, {legacy_versions, [\"0.5\"]}]).\n

Warning

Watch out for conflicts! Put the xep attribute in the main module implementing the extension to avoid conflicting declarations. If you need to specify the same XEP multiple times (e.g. because the implementation is split into two parts), make sure that the version properties are the same for all attributes - otherwise the XEP tool will report an error. The resulting status for a XEP is complete unless all implementing modules have the partial status.

"},{"location":"developers-guide/xep_tool/#compile-and-run","title":"Compile and run","text":"

After annotating all modules with the xep attributes, you need to run make in order to generate the .beam files. Next, you can run the XEP tool. It has a mandatory argument, which specifies the output format:

  • markdown - a Markdown table,
  • list - an Erlang list,
  • doap - Description Of A Project.

For example, to print out the DOAP, you can run the following from the MongooseIM project directory:

tools/xep_tool/xep_tool.escript doap\n

To save the output to a file, you can just provide the file name as the second argument.

tools/xep_tool/xep_tool.escript doap doc/mongooseim.doap\ntools/xep_tool/xep_tool.escript markdown doc/user-guide/Supported-XEPs.md\n

The last two commands have a shortcut in the Makefile:

make xeplist\n
"},{"location":"developers-guide/xep_tool/#examples-of-generated-content","title":"Examples of generated content","text":"
  • Markdown table
  • DOAP file
"},{"location":"getting-started/Installation/","title":"Installation","text":"

There are multiple ways in which you can get MongooseIM:

  • install MongooseIM binaries from a package Erlang Solutions delivers,
  • get the Docker image,
  • use the Helm chart.

Alternatively, check out our tutorial on How to build MongooseIM from source code for an introduction to compiling, building and testing MongooseIM.

"},{"location":"getting-started/Installation/#packages","title":"Packages","text":"

Go to the downloads section of the Erlang Solutions website, and choose the version of MongooseIM you want. The following sections describe the installation process for different operating systems.

Ubuntu and DebianCentOS compatible: AlmaLinux / Rocky Linux

Once the deb file is downloaded, open a terminal window and navigate to the directory containing the package. Use the following command to unpack and install MongooseIM:

sudo dpkg -i mongooseim_[version here].deb\n

An ODBC (RDBMS) driver must be installed on your machine to unpack and install from RPM packages. Enter the following command in a terminal window to install the latest unixODBC driver:

sudo yum install unixODBC\n

Once the RPM file is downloaded, open a terminal window and navigate to the directory containing the package. Use the following command to unpack and install MongooseIM:

sudo rpm -i mongooseim_[version here].rpm\n
"},{"location":"getting-started/Installation/#docker","title":"Docker","text":"

In order to install MongooseIM using Docker, simply run the following command:

docker pull mongooseim/mongooseim\n

This will download the latest release. You can use tags to download an exact version.

We build Docker images for every release marked with a git tag, as well as for every Pull Request. You can see all of them on DockerHub. In order to learn more about how the images are built, please visit the source code repository.

The mongooseimctl command is available in /usr/lib/mongooseim/bin/mongooseimctl in the container.

"},{"location":"getting-started/Installation/#helm","title":"Helm","text":"

You can easily install MongooseIM to a Kubernetes cluster with the help of our Helm chart, defined in the source code repository. After you have a Kubernetes cluster set up, simply run:

helm repo add mongoose https://esl.github.io/MongooseHelm/\n

to add our chart repository, and then:

helm install my-mongooseim mongoose/mongooseim\n

to install the chart. You can use any name instead of my-mongooseim, or generate a random name.

"},{"location":"getting-started/Installation/#source","title":"Source","text":"

Please see the tutorial How to build MongooseIM from source code.

"},{"location":"getting-started/Quick-setup/","title":"Quick Setup","text":"

In this short guide we will set MongooseIM up and get users chatting right away. The goal is to get to know MongooseIM, set it up, go through basic operations and validation.

You should have MongooseIM already installed on your machine and the mongooseimctl command available. If you have not installed MIM, please refer to the installation instructions.

Warning

This setup is not intended for production.

Note

This procedure has been tested on an Ubuntu 18.04.x LTS.

"},{"location":"getting-started/Quick-setup/#running-mongooseim","title":"Running MongooseIM","text":"

Warning

MongooseIM will use its default database - Mnesia, which is faster and simpler to set up, but not intended for production purposes when it comes to persistent data.

It is possible to use external databases instead - for more information, see the database backend configuration page.

The following command will start the MongooseIM server:

mongooseimctl start\n

When you change the config file and want to restart the MongooseIM server:

mongooseimctl restart\n

Use the following command to stop the MongooseIM server:

mongooseimctl stop\n
This takes a few seconds.

At any given time, the following command shows the status of a MongooseIM server:

mongooseimctl status\n
If the command replies nodedown then MongooseIM is not running. Else it will show its status starting, started, or stopping, and its version.

When needed, you can also launch the server in the interactive mode:

mongooseimctl live\n
This will allow you to better detect and understand the errors in the configuration. When MongooseIM is properly running, the Erlang shell/console is then shown. Just type Control-C twice to exit, the server will then be shut down.

For running MongooseIM in a non-interactive way within a supervision system (e.g. systemd), it is recommended to use the foreground mode:

mongooseimctl foreground\n
Typing Control-C will stop the server.

You can check server loglevel:

mongooseimctl get_loglevel\n

Run bootstrap scripts for initial configuration:

mongooseimctl bootstrap\n

It executes scripts inside the scripts/ directory with a bootstrap prefix in alphabetical order. More information

Execute Hello from the scripts/bootstrap01-hello.sh script that you can find in the release directory $REPO_DIR/_build/prod/rel/mongooseim.

"},{"location":"getting-started/Quick-setup/#chat-users","title":"Chat users","text":""},{"location":"getting-started/Quick-setup/#registering-creating-users","title":"Registering (creating) users","text":"

The default XMPP domain served by MongooseIM right after installation is localhost.

You can register (create) users with the mongooseimctl utility.

This command registers the user user@localhost using password secret.

mongooseimctl account registerUser --username user --domain localhost --password secret\n
Examples:
mongooseimctl account registerUser --username alice --domain localhost --password qwerty\nmongooseimctl account registerUser --username bob --domain localhost --password 12345678\nmongooseimctl account registerUser --username carol --domain localhost --password abc123\nmongooseimctl account registerUser --username dan --domain localhost --password dan\n

Warning

The password is entered manually in the command line and history is accessible to the command line users. This method is not recommended for production use, you may prefer for example LDAP.

You can check that the user account has been created:

mongooseimctl account checkUser --user alice@localhost\n{\n  \"data\" : {\n    \"account\" : {\n      \"checkUser\" : {\n        \"message\" : \"User alice@localhost exists\",\n        \"exist\" : true\n      }\n    }\n  }\n}\n

Now you can list all registered users in your host:

mongooseimctl account listUsers --domain localhost\n{\n  \"data\" : {\n    \"account\" : {\n      \"listUsers\" : [\n        \"alice@localhost\",\n        \"bob@localhost\",\n        \"carol@localhost\",\n        \"dan@localhost\"\n      ]\n    }\n  }\n}\n

If you want to delete a user from your host:

mongooseimctl account removeUser --user dan@localhost\n{\n  \"data\" : {\n    \"account\" : {\n      \"removeUser\" : {\n        \"message\" : \"User dan@localhost successfully unregistered\",\n        \"jid\" : \"dan@localhost\"\n      }\n    }\n  }\n}\n

"},{"location":"getting-started/Quick-setup/#populate-the-contact-lists-rosters","title":"Populate the contact lists (rosters)","text":"

As an example, let's add bob@localhost as a contact of alice@localhost:

mongooseimctl roster addContact --user alice@localhost --contact bob@localhost --groups '[\"friends\"]' --name Bobby\n{\n  \"data\" : {\n    \"roster\" : {\n      \"addContact\" : \"Contact added successfully\"\n    }\n  }\n}\n

You need to quote [\"friends\"] because it is a list of strings - JSON syntax is required for such complex types. The single quotes are there to prevent bash from interpreting special characters like \". If you want alice@locahost to receive presences from bob@localhost, you need to firstly request the subscription:

mongooseimctl roster subscription --user alice@localhost --contact bob@localhost --action INVITE\n{\n  \"data\" : {\n    \"roster\" : {\n      \"subscription\" : \"Subscription stanza with type subscribe sent successfully\"\n    }\n  }\n}\n

Then, accept the subscription request:

mongooseimctl roster subscription --user bob@localhost --contact alice@localhost --action ACCEPT\n{\n  \"data\" : {\n    \"roster\" : {\n      \"subscription\" : \"Subscription stanza with type subscribed sent successfully\"\n    }\n  }\n}\n

Verify the contact list:

mongooseimctl roster listContacts --user alice@localhost\n{\n  \"data\" : {\n    \"roster\" : {\n      \"listContacts\" : [\n        {\n          \"subscription\" : \"TO\",\n          \"name\" : \"Bobby\",\n          \"jid\" : \"bob@localhost\",\n          \"groups\" : [\n            \"friends\"\n          ],\n          \"ask\" : \"NONE\"\n        }\n      ]\n    }\n  }\n}\n

Note that bob@localhost has alice@localhost in his contacts as well, but he is not subscribed to her presences - the subscriptions are unidirectional.

mongooseimctl roster listContacts --user bob@localhost\n{\n  \"data\" : {\n    \"roster\" : {\n      \"listContacts\" : [\n        {\n          \"subscription\" : \"FROM\",\n          \"name\" : \"\",\n          \"jid\" : \"alice@localhost\",\n          \"groups\" : [\n\n          ],\n          \"ask\" : \"NONE\"\n        }\n      ]\n    }\n  }\n}\n

To quickly set up mutual subscriptions between users, you can use mongooseimctl roster setMutualSubscription.

"},{"location":"getting-started/Quick-setup/#basic-mongooseim-configuration","title":"Basic MongooseIM configuration","text":"

The main configuration file of MongooseIM is mongooseim.toml:

/etc/mongooseim/mongooseim.toml\n
You can edit this file to tailor MongooseIM to your needs. Learn more about MongooseIM configuration files in general, or jump right into the documentations of different mongooseim.toml sections.

For each change, edit the configuration file using the right Linux/Unix user. Save (and optionally backup, archive, or version) the configuration file and restart the MongooseIM server.

"},{"location":"getting-started/Quick-setup/#logging","title":"Logging","text":"

Set your own loglevel in the configuration file:

[general]\n  loglevel = \"notice\"\n

Save and exit your editor, restart MongooseIM and check your loglevel from the command line:

mongooseimctl get_loglevel\n

Read the mongooseim.log file:

/var/log/mongooseim/mongooseim.log\n

You can use commands such cat, more or less, even head or tail. In order to see live logs:

tail -f /var/log/mongooseim/mongooseim.log\n
Type Ctrl+C to exit.

"},{"location":"getting-started/Quick-setup/#muc-multi-user-chat-for-groupchats","title":"MUC (Multi-User Chat) for groupchats","text":"

Enable MUC, or Multi-User Chat, for groupchats/channels in the mongooseim.toml file:

[modules.mod_muc]\n  host = \"muc.@HOST@\"\n  access = \"muc\"\n  access_create = \"muc_create\"\n

"},{"location":"getting-started/Quick-setup/#roster-versioning","title":"Roster versioning","text":"

For faster contact list downloads at each client/app (re)connection, edit the configuration file:

[modules.mod_roster]\n  versioning = true\n  store_current_id = true\n

"},{"location":"getting-started/Quick-setup/#review-configuration","title":"Review configuration","text":"

If MongooseIM does not start because the configuration file is broken in some way:

mongooseimctl live\n

"},{"location":"getting-started/Quick-setup/#using-an-xmppjabber-clientapp","title":"Using an XMPP/Jabber client/app","text":"

The following steps use the registered users on the MongooseIM server, done above.

Users that are registered on your server can now add their accounts in a chat application like Gajim (specifying either the server\u2019s IP address or domain name), and start chatting!

"},{"location":"getting-started/Quick-setup/#note-about-session-conflicts","title":"Note about session conflicts","text":"

If you're going to connect several clients with the same username and domain (for example a phone and a laptop), please make sure they are using different resource names (a kind of device/client identifier). This should be configurable in the account settings of every XMPP client.

Otherwise, the clients will keep disconnecting each other, because MongooseIM always terminates the older session in case of a conflict.

"},{"location":"getting-started/Quick-setup/#connect-gajim","title":"Connect Gajim","text":"

Gajim is available on many Linux platforms, macOS & Windows.

Warning

Gajim has an obsolete UX. However, it is still well maintained, and has a console that is extremely useful for debugging and testing/validation purposes at the XMPP protocol level.

  1. Launch Gajim. Ignore the window with Plugin updates.
  2. Go to Edit -> Accounts.
  3. Click Add in the left part of the window and select I already have an account I want to use, click Forward
  4. Enter the user, domain and password for the accounts registered previously on the command line
  5. Click Forward and then Finish
  6. Ignore the TLS/SSL error/warning and continue
  7. Close the Account window.

Add your three created users: alice, bob, and carol.

Check what users are currently connected.

mongooseimctl session listSessions\n{\n  \"data\" : {\n    \"session\" : {\n      \"listSessions\" : [\n        {\n          \"user\" : \"bob@localhost/BobsComputer,\n          \"uptime\" : 12,\n          \"priority\" : 50,\n          \"port\" : 56267,\n          \"node\" : \"mongooseim@localhost\",\n          \"ip\" : \"127.0.0.1\",\n          \"connection\" : \"c2s_tls\"\n        }\n      ]\n    }\n  }\n}\n

The result shows that Bob is currently connected.

"},{"location":"getting-started/Quick-setup/#chat-with-another-person","title":"Chat with another person","text":"

Use alice's account to send messages directly to bob and use bob's account to reply directly to alice.

It is possible to send a message from the command line:

mongooseimctl stanza sendMessage --from alice@localhost --to bob@localhost --body 'Hi Bob!'\n

You need to quote Hi Bob!, because it contains a space. If you do it while Bob is connected, he should receive the message in the XMPP client.

"},{"location":"getting-started/Quick-setup/#group-chats","title":"Group chats","text":"

Use alice's account to create a groupchat channel on your muc.localhost service, and configure it by making it persistent. Invite bob and carol. From bob's' and carol's accounts, accept the invitation and join the channel groupchat. All three users exchange messages.

"},{"location":"getting-started/Quick-setup/#contact-lists","title":"Contact lists","text":"

Use carol's account to add alice and bob to her contact list. Use alice's and bob's accounts accept those additions.

Verify on the MongooseIM server:

mongooseimctl roster listContacts --user alice@localhost\nmongooseimctl roster listContacts --user bob@localhost\n

"},{"location":"getting-started/Quick-setup/#profile-vcard","title":"Profile (vCard)","text":"

Edit alice's profile (vCard) in Gajim: Modify Account..., then Profile, just set her Name to Alice.

Verify on the MongooseIM server:

mongooseimctl vcard getVcard --user alice@localhost\n{\n  \"data\" : {\n    \"vcard\" : {\n      \"getVcard\" : {\n        (...)\n        \"telephone\" : [\n          {\n            \"tags\" : [\n              \"HOME\",\n              \"VOICE\"\n            ],\n            \"number\" : \"123456789\"\n          }\n        ],\n        (...)\n        \"formattedName\" : \"Alice\",\n        (...)\n      }\n    }\n  }\n}\n

"},{"location":"getting-started/Quick-setup/#summary","title":"Summary","text":"

Now you have the minimum knowledge: you know how to deploy MongooseIM, configure some basic features, check/verify a few useful items, validate it both on the client and server side, and utilize a few good practices.

"},{"location":"getting-started/Quick-setup/#summary-command-line","title":"Summary: command line","text":"

You know mongooseimctl, with basic server management commands such as:

  • start, restart, stop, status, live, foreground
  • get_loglevel

Other commands shown above correspond to the GraphQL Admin API operations, and they are grouped into the following categories:

  • account contains registerUser, checkUser, listUsers, removeUser
  • roster contains addContact, subscription, listContacts, setMutualSubscription
  • session contains listSessions
  • stanza contains sendMessage
  • vcard contains getVcard

There are more categories and commands. For a list of categories, use mongooseimctl without any arguments. To get a list of commands in a particular category, call mongooseimctlcategory. You can also get more information about a particular command with mongooseimctlcategory command--help.

"},{"location":"getting-started/Quick-setup/#summary-files","title":"Summary: files","text":"

You know basic entries in the files: /etc/mongooseim/mongooseim.toml /var/log/mongooseim/mongooseim.log

"},{"location":"getting-started/Quick-setup/#summary-clientapp","title":"Summary: client/app","text":"

In an app, you know how to:

  • connect
  • chat with another user
  • create/join groupchats
  • manage contact lists (roster)
  • edit profile (vCard)
"},{"location":"getting-started/Quick-setup/#go-further","title":"Go further","text":"

For the next steps, we now encourage you to:

  1. Deploy it as a single node, on a publicly accessible server, with a real routable domain name with its certificate
  2. Add an RDBMS for persistent data, and LDAP for user directory
  3. Enable message history with MAM (Message Archive Management)
  4. Enable file exchange with HTTP file upload, with an S3-compatible object storage server
  5. Use a mobile app for users to chat
"},{"location":"graphql-api/Admin-GraphQL/","title":"MongooseIM's GraphQL API for the administrator","text":"

The new GraphQL admin API contains all the commands available through the REST API, and the vast majority of the CLI (mongooseimctl) commands. Only commands that wouldn't have worked well with GraphQL style have been omitted.

We can distinguish two levels of the administration. A global admin (has access to all commands), and the admin per domain (has access only to the own domain). Each of them is handled by a different endpoint. Please see the configuration Listen section for more details.

There is only one schema for both admin types. Admin per domain simply has no permissions to execute global commands or commands with not owned domain. The API documentation clearly says which commands are global.

Queries and mutations can be executed with the POST or GET method, as specified in the GraphQL documentation. The endpoint URL is as configured in the Listen section, e.g. http://localhost:5551/api/graphql for the global admin.

Subscriptions can be executed with the GET method, and are handled with Server-Sent Events (SSE). The endpoint URL is the same as for regular queries with the addition of /sse, e.g. http://localhost:5551/api/graphql/sse for the global admin.

"},{"location":"graphql-api/Admin-GraphQL/#domain-admin-configuration","title":"Domain admin configuration","text":"

Out of the box, domains are created with a disabled admin account. Admin per domain can be enabled only by the global admin with the command mutation.domains.setDomainPassword. Afterward, the domain admin can change the password with the same command.

The admin per domain can be disabled by the global admin with the command mutation.domains.removeDomainPassword.

"},{"location":"graphql-api/Admin-GraphQL/#authentication","title":"Authentication","text":"

MongooseIM uses Basic Authentication as an authentication method for the GraphQL API.

Basic authentication is a simple authentication scheme built into the HTTP protocol. Each HTTP request to the GraphQL API has to contain the Authorization header with the word Basic followed by a space and a base64-encoded string.

"},{"location":"graphql-api/Admin-GraphQL/#global-admin-endpoint","title":"Global admin endpoint","text":"

The authentication for global admin is optional because this endpoint shouldn't be exposed outside. The credentials set in the handler section in the config enables the authentication. Please see the GraphQL handler section for more details.

The base64-encoded string should have the form LOGIN:PASSWORD, where:

  • LOGIN is the login set in the config,
  • PASSWORD is the password set in the config.
"},{"location":"graphql-api/Admin-GraphQL/#domain-admin-endpoint","title":"Domain admin endpoint","text":"

The authorization as a domain admin the base64-encoded string should have the form admin@DOMAIN:PASSWORD, where:

  • DOMAIN is the domain to authorize,
  • PASSWORD is the password for the given domain.
"},{"location":"graphql-api/Admin-GraphQL/#graphiql","title":"GraphiQL","text":"

GraphiQL is the GraphQL integrated development environment (IDE). It allows to experiment with API and run queries with ease. The GraphiQL page is automatically served with each GraphQL endpoint. For example:

http://localhost:5551/api/graphql

Open the above address in your browser and try to use it.

"},{"location":"graphql-api/Admin-GraphQL/#authorization","title":"Authorization","text":"

Executing some of the queries requires authorization. Just add the following JSON into the header tab. Remember to update the credentials.

{\n   \"Authorization\": \"Basic YWxpY2VAbG9jYWxob3N0OnNlY3JldA==\"\n}\n
"},{"location":"graphql-api/Admin-GraphQL/#static-documentation","title":"Static documentation","text":"

Open GraphQL documentation as a full page

"},{"location":"graphql-api/User-GraphQL/","title":"MongooseIM's GraphQL API for the user","text":"

The new GraphQL user API contains all commands from the client REST API and provides plenty of new ones. Multiple commands previously available only for the admin have their counterparts for the user.

Queries and mutations can be executed with the POST or GET method, as specified in the GraphQL documentation. The endpoint URL is as configured in the Listen section, e.g. http://localhost:5561/api/graphql.

Subscriptions can be executed with the GET method, and are handled with Server-Sent Events (SSE). The endpoint URL is the same as for regular queries with the addition of /sse, e.g. http://localhost:5561/api/graphql/sse.

"},{"location":"graphql-api/User-GraphQL/#authentication","title":"Authentication","text":"

MongooseIM uses Basic Authentication as the authentication method for the GraphQL API.

Basic authentication is a simple authentication scheme built into the HTTP protocol. Each HTTP request to the client REST API has to contain the Authorization header with the word Basic followed by a space and a base64-encoded string username@host:password, where:

  • username@host is the user's bare JID,
  • password is the password used to register the user's account.

For example, to authorize as alice@localhost with the password secret, the client would send a header:

Authorization: Basic YWxpY2VAbG9jYWxob3N0OnNlY3JldA==\n
"},{"location":"graphql-api/User-GraphQL/#graphiql","title":"GraphiQL","text":"

GraphiQL is the GraphQL integrated development environment (IDE). It allows to experiment with API and run queries with ease. The GraphiQL page is automatically served with each GraphQL endpoint. For example:

http://localhost:5561/api/graphql

Open the above address in your browser and try to use it.

"},{"location":"graphql-api/User-GraphQL/#authorization","title":"Authorization","text":"

Executing some of the queries requires authorization. Just add the following JSON into the header tab. Remember to update the credentials.

{\n   \"Authorization\": \"Basic YWxpY2VAbG9jYWxob3N0OnNlY3JldA==\"\n}\n
"},{"location":"graphql-api/User-GraphQL/#static-documentation","title":"Static documentation","text":"

Open GraphQL documentation as a full page

"},{"location":"listeners/listen-c2s/","title":"Client to server (C2S): [[listen.c2s]]","text":"

Handles XMPP client-to-server (C2S) connections. The recommended port number for a C2S listener is 5222 as registered in the XMPP protocol.

"},{"location":"listeners/listen-c2s/#configuration-options","title":"Configuration options","text":"

The following options are supported for each C2S listener:

"},{"location":"listeners/listen-c2s/#listenc2saccess","title":"listen.c2s.access","text":"
  • Syntax: string, rule name or \"all\"
  • Default: \"all\"
  • Example: access = \"c2s\"

The rule that determines who is allowed to connect. By default, the rule is \"all\", which means that anyone can connect. The rule referenced here needs to be defined in the access configuration section.

"},{"location":"listeners/listen-c2s/#listenc2sshaper","title":"listen.c2s.shaper","text":"
  • Syntax: string, rule name
  • Default: \"none\" (no shaper)
  • Example: shaper = \"c2s_shaper\"

The rule that determines what traffic shaper is used to limit the incoming XMPP traffic to prevent the server from being flooded with incoming data. The rule referenced here needs to be defined in the access configuration section. The value of the access rule needs to be either the shaper name or the string \"none\", which means no shaper.

"},{"location":"listeners/listen-c2s/#listenc2smax_connections","title":"listen.c2s.max_connections","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_connections = 10000

Maximum number of open connections. This is a soft limit according to the Ranch documentation.

"},{"location":"listeners/listen-c2s/#listenc2sc2s_state_timeout","title":"listen.c2s.c2s_state_timeout","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 5000
  • Example: c2s_state_timeout = 10_000

Timeout value (in milliseconds) used by the C2S state machine when waiting for the connecting client to respond during stream negotiation and SASL authentication. After the timeout the server responds with the connection-timeout stream error and closes the connection.

"},{"location":"listeners/listen-c2s/#listenc2sreuse_port","title":"listen.c2s.reuse_port","text":"
  • Syntax: boolean
  • Default: false
  • Example: reuse_port = true

Enables linux support for SO_REUSEPORT, see Stack Overflow for more details.

"},{"location":"listeners/listen-c2s/#listenc2sbackwards_compatible_session","title":"listen.c2s.backwards_compatible_session","text":"
  • Syntax: boolean
  • Default: true
  • Example: backwards_compatible_session = false

Enables backward-compatible session establishment IQs. See https://www.rfc-editor.org/rfc/rfc6121.html#section-1.4:

[RFC3921] specified one additional precondition: formal establishment of an instant messaging and presence session. Implementation and deployment experience has shown that this additional step is unnecessary. However, for backward compatibility an implementation MAY still offer that feature. This enables older software to connect while letting newer software save a round trip.

"},{"location":"listeners/listen-c2s/#listenc2sallowed_auth_methods","title":"listen.c2s.allowed_auth_methods","text":"
  • Syntax: array of strings. Allowed values: \"internal\", \"rdbms\", \"external\", \"anonymous\", \"ldap\", \"jwt\", \"http\", \"pki\", \"dummy\"
  • Default: not set
  • Example: allowed_auth_methods = [\"internal\"]

A subset of enabled methods to login with for this listener. This option allows to enable only some backends. It is useful, if you want to have several listeners for different type of users (for example, some users use PKI while other users use LDAP auth). Same syntax as for auth.methods option.

"},{"location":"listeners/listen-c2s/#tls-options-for-c2s","title":"TLS options for C2S","text":"

To enable TLS, a TOML subsection called tls has to be present in the listener options. To disable TLS, make sure that the section is not present, and no TLS options are set. You can set the following options in this section:

"},{"location":"listeners/listen-c2s/#listenc2stlsmode","title":"listen.c2s.tls.mode","text":"
  • Syntax: string, one of \"tls\", \"starttls\", \"starttls_required\"
  • Default: \"starttls\"
  • Example: tls.mode = \"starttls\"

This option determines how clients are supposed to set up the TLS encryption:

  • tls - clients must initiate a TLS session immediately after connecting, before beginning the normal XML stream,
  • starttls - enables StartTLS support; requires certfile,
  • starttls_required - enables and enforces StartTLS usage.
"},{"location":"listeners/listen-c2s/#listenc2stlsmodule","title":"listen.c2s.tls.module","text":"
  • Syntax: string, one of \"just_tls\", \"fast_tls\"
  • Default: \"fast_tls\"
  • Example: tls.module = \"just_tls\"

By default, the TLS library used for C2S connections is fast_tls, which uses OpenSSL-based NIFs. It is possible to change it to just_tls - Erlang TLS implementation provided by OTP. Some TLS-related options described here have different formats for these two libraries.

"},{"location":"listeners/listen-c2s/#listenc2stlsverify_mode","title":"listen.c2s.tls.verify_mode","text":"
  • Syntax: string, one of \"peer\", \"selfsigned_peer\", \"none\"
  • Default: \"peer\"
  • Example: tls.verify_mode = \"none\"

Specifies the way client certificate verification works:

  • peer - makes sure the client certificate is valid and signed by a trusted CA. Requires a valid cacertfile.
  • selfsigned_peer - makes sure the client certificate is valid, but allows self-signed certificates; supported only by just_tls. Requires a valid cacertfile.
  • none - client certificate is not checked.
"},{"location":"listeners/listen-c2s/#listenc2stlscertfile","title":"listen.c2s.tls.certfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.certfile = \"server.pem\"

Path to the X509 PEM file with a certificate and a private key (not protected by a password). If the certificate is signed by an intermediate CA, you should specify here the whole CA chain by concatenating all public keys together and appending the private key after that.

Note

For just_tls this file should only contain the certificate and the path to the private key can be provided separately as keyfile.

"},{"location":"listeners/listen-c2s/#listenc2stlscacertfile","title":"listen.c2s.tls.cacertfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.cacertfile = \"ca.pem\"

Path to the X509 PEM file with a CA chain that will be used to verify clients. It won't have any effect if verify_mode is \"none\".

"},{"location":"listeners/listen-c2s/#listenc2stlsdhfile","title":"listen.c2s.tls.dhfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.dhfile = \"dh.pem\"

Path to the Diffie-Hellman parameter file.

"},{"location":"listeners/listen-c2s/#listenc2stlsciphers","title":"listen.c2s.tls.ciphers","text":"
  • Syntax: string with the OpenSSL cipher suite specification
  • Default: for fast_tls the default is\"TLSv1.2:TLSv1.3\". For just_tls this option is not set by default - all supported suites are accepted.
  • Example: tls.ciphers = \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384\"

Cipher suites to use with StartTLS or TLS. Please refer to the OpenSSL documentation for the cipher string format. For fast_tls, this string can be used to specify versions as well. For just_tls, see the Erlang/OTP SSL documentation for allowed values.

"},{"location":"listeners/listen-c2s/#listenc2stlsprotocol_options-only-for-fast_tls","title":"listen.c2s.tls.protocol_options - only for fast_tls","text":"
  • Syntax: array of strings
  • Default: [\"no_sslv2\", \"no_sslv3\", \"no_tlsv1\", \"no_tlsv1_1\"]
  • Example: tls.protocol_options = [\"no_tlsv1\", \"no_tlsv1_1\"]

A list of OpenSSL options for FastTLS. You can find the mappings between supported options and actual OpenSSL flags in the fast_tls source code.

"},{"location":"listeners/listen-c2s/#listenc2stlskeyfile-only-for-just_tls","title":"listen.c2s.tls.keyfile - only for just_tls","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.keyfile = \"key.pem\"

Path to the X509 PEM file with the private key.

"},{"location":"listeners/listen-c2s/#listenc2stlspassword-only-for-just_tls","title":"listen.c2s.tls.password - only for just_tls","text":"
  • Syntax: string
  • Default: not set
  • Example: tls.password = \"secret\"

Password to the X509 PEM file with the private key.

"},{"location":"listeners/listen-c2s/#listenc2stlsdisconnect_on_failure-only-for-just_tls","title":"listen.c2s.tls.disconnect_on_failure - only for just_tls","text":"
  • Syntax: boolean
  • Default: true
  • Example: tls.disconnect_on_failure = false
"},{"location":"listeners/listen-c2s/#listenc2stlsversions-only-for-just_tls","title":"listen.c2s.tls.versions - only for just_tls","text":"
  • Syntax: array of strings
  • Default: not set, all supported versions are accepted
  • Example: tls.versions = [\"tlsv1.2\", \"tlsv1.3\"]

TLS versions to use with StartTLS or TLS. For allowed values, see the Erlang/OTP SSL documentation

"},{"location":"listeners/listen-c2s/#listenc2stlscrl_files-only-for-just_tls","title":"listen.c2s.tls.crl_files - only for just_tls","text":"
  • Syntax: array of strings, paths in the file system
  • Default: not set
  • Example: tls.crl_files = [\"certs.crl\"]

Specifies the paths to Certificate Revocation Lists.

"},{"location":"listeners/listen-c2s/#c2s-listener-configuration-example","title":"C2S listener configuration example","text":"

The following section configures two C2S listeners.

[[listen.c2s]]\n  port = 5222\n  access = \"c2s\"\n  shaper = \"c2s_shaper\"\n  max_stanza_size = 65536\n  tls.certfile = \"server.pem\"\n  tls.dhfile = \"dh_server.pem\"\n\n[[listen.c2s]]\n  port = 5223\n  access = \"c2s\"\n  shaper = \"c2s_shaper\"\n  max_stanza_size = 65536\n
  • One at port 5222, which accepts a plain TCP connection and allows to use StartTLS for upgrading it to an encrypted one. The files containing the certificate and the DH parameter are also provided.
  • One at port 5223, which accepts only encrypted TLS connections - this is the legacy method as StartTLS is preferred.

Both listeners use c2s and c2s_shaper rules for access management and traffic shaping, respectively.

"},{"location":"listeners/listen-components/","title":"XMPP components: [[listen.service]]","text":"

Interface for external services acting as XMPP components (XEP-0114: Jabber Component Protocol), enabling communication between MongooseIM and external services over the XMPP network. The recommended port number for a component listener is 8888.

According to XEP-0114: Jabber Component Protocol the component's hostname should be given in the element.

Warning

This interface does not support dynamic domains. Do not use them both at the same time.

"},{"location":"listeners/listen-components/#configuration-options","title":"Configuration options","text":"

The following options are supported for each component listener under listen.service subsection:

"},{"location":"listeners/listen-components/#listenserviceaccess","title":"listen.service.access","text":"
  • Syntax: string, rule name or \"all\"
  • Default: \"all\"
  • Example: access = \"component\"

Determines who is allowed to send data to external components. By default, the rule is all, which means that anyone can communicate with the components.

"},{"location":"listeners/listen-components/#listenservicepassword","title":"listen.service.password","text":"
  • Syntax: string
  • Default: no default, this option is mandatory
  • Example: password = \"secret\"

The external component needs to authenticate with this password to connect.

"},{"location":"listeners/listen-components/#listenserviceshaper_rule","title":"listen.service.shaper_rule","text":"
  • Syntax: string, name of the shaper
  • Default: \"none\"
  • Example: shaper = \"component_shaper\"

The traffic shaper used to limit the XMPP traffic to prevent the server from being flooded with incoming data. Contrary to the C2S and S2S shapers, here the shaper name directly references the shaper that needs to be defined in the shaper section.

"},{"location":"listeners/listen-components/#listenservicecheck_from","title":"listen.service.check_from","text":"
  • Syntax: boolean
  • Default: true
  • Example: check_from = false

Specifies whether the server should verify the \"from\" field in stanzas from the component.

"},{"location":"listeners/listen-components/#listenservicehidden_components","title":"listen.service.hidden_components","text":"
  • Syntax: boolean
  • Default: false
  • Example: hidden_components = true

All components connected to an endpoint with this option enabled will be considered \"hidden\".

Hidden components have a special flag enabled in the internal component table. Alone, it doesn't change the server behaviour in any way, but it may be used by other modules and extensions to execute special logic. An example would be mod_disco, which may be configured to filter out hidden components from disco results, so they won't be discoverable by clients. A reason to do so could be reduced traffic - systems with many components could return very long disco responses. Also, some deployments would like to avoid revealing some services; not because it is a security threat (this method does not prevent clients from communicating with hidden components), but rather because they are not meant to interact with clients directly (e.g. helper components for other components).

"},{"location":"listeners/listen-components/#listenserviceconflict_behaviour","title":"listen.service.conflict_behaviour","text":"
  • Syntax: string, one of: \"disconnect\", \"kick_old\"
  • Default: \"disconnect\"
  • Example: conflict_behaviour = \"kick_old\"

By default, when a component tries to connect and a registration conflict occurs, the connection is dropped with the following error:

<stream:error>\n  <conflict xmlns='urn:ietf:params:xml:ns:xmpp-streams'/>\n</stream:error>\n</stream:stream>\n

It makes implementing the reconnection logic difficult, because the old connection would not allow any other connections. By setting this option to kick_old, we drop any old connections registered at the same host before accepting new ones.

"},{"location":"listeners/listen-components/#listenservicemax_fsm_queue","title":"listen.service.max_fsm_queue","text":"
  • Syntax: positive integer
  • Default: not set - no limit
  • Example: max_fsm_queue = 1000

Message queue limit to prevent resource exhaustion; overrides the value set in the general section.

"},{"location":"listeners/listen-components/#custom-extension-to-the-protocol","title":"Custom extension to the protocol","text":"

In order to register a component for all virtual hosts served by the server (see hosts in the general section), the component must add the attribute is_subdomain=\"true\" to the opening stream element. This maybe helpful if someone wants to have a single instance of a component serving multiple virtual hosts. The is_subdomain attribute is optional and the default behaviour is as described in XEP-0114: Jabber Component Protocol.

"},{"location":"listeners/listen-components/#service-listener-configuration-example","title":"Service listener configuration example","text":"

The following section configures a service listener, accepting connections from external components. The IP address is limited to loopback to prevent connections from different hosts. All components are allowed to connect, but they need to provide the password. The shaper named fast needs to be defined in the shaper section.

[[listen.service]]\n  port = 8888\n  access = \"all\"\n  shaper_rule = \"fast\"\n  ip_address = \"127.0.0.1\"\n  password = \"secret\"\n
"},{"location":"listeners/listen-http/","title":"HTTP-based services: [[listen.http]]","text":"

Manages all HTTP-based services, such as BOSH (HTTP long-polling), WebSocket, GraphQL and REST. It uses the Cowboy web server. Recommended port number: 5280 for BOSH/WS.

"},{"location":"listeners/listen-http/#configuration-options","title":"Configuration options","text":"

Following configuration option is used to set up an HTTP handler:

"},{"location":"listeners/listen-http/#listenhttphandlers","title":"listen.http.handlers","text":"
  • Syntax: each handler is specified in a subsection starting with [[listen.http.handlers.type]] where type is one of the allowed handler types, handling different connection types:

    • mod_bosh - for BOSH connections,
    • mod_websockets - for WebSocket connections,
    • mongoose_graphql_handler - for GraphQL API,
    • mongoose_admin_api, mongoose_client_api - for REST API.

    These types are described below in more detail. The double-bracket syntax is used because there can be multiple handlers of a given type, so for each type there is a TOML array of one or more tables (subsections).

  • Default: [] - no handlers enabled, all of them need to be specified explicitly.

  • Example: two handlers, one for BOSH and one for WebSockets
      [[listen.http.handlers.mod_bosh]]\n    host = \"_\"\n    path = \"/http-bind\"\n\n  [[listen.http.handlers.mod_websockets]]\n    host = \"_\"\n    path = \"/ws-xmpp\"\n
"},{"location":"listeners/listen-http/#common-handler-options","title":"Common handler options","text":""},{"location":"listeners/listen-http/#listenhttphandlershost","title":"listen.http.handlers.*.host","text":"
  • Syntax: string
  • Default: no default, mandatory option
  • Example: host = \"localhost\"

Host name for this handler or \"_\" for any host.

"},{"location":"listeners/listen-http/#listenhttphandlerspath","title":"listen.http.handlers.*.path","text":"
  • Syntax: string
  • Default: no default, mandatory option
  • Example: path = \"/ws-xmpp\"

Path for this handler.

"},{"location":"listeners/listen-http/#handler-types-bosh-mod_bosh","title":"Handler types: BOSH - mod_bosh","text":"

The recommended configuration is shown in Example 1 below. To handle incoming BOSH traffic you need to configure the mod_bosh module in the modules section as well.

"},{"location":"listeners/listen-http/#handler-types-websockets-mod_websockets","title":"Handler types: WebSockets - mod_websockets","text":"

The recommended configuration is shown in Example 1 below. Websocket connections as defined in RFC 7395. You can pass the following optional parameters:

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketstimeout","title":"listen.http.handlers.mod_websockets.timeout","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: timeout = 60_000

The time (in milliseconds) after which an inactive user is disconnected.

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketsping_rate","title":"listen.http.handlers.mod_websockets.ping_rate","text":"
  • Syntax: positive integer
  • Default: not set - pings disabled
  • Example: ping_rate = 10_000

The time (in milliseconds) between pings sent by server. By setting this option you enable server-side pinging.

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketsmax_stanza_size","title":"listen.http.handlers.mod_websockets.max_stanza_size","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_stanza_size = 10_000

Maximum allowed incoming stanza size in bytes.

Warning

This limit is checked after the input data parsing, so it does not apply to the input data size itself.

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketsc2s_state_timeout","title":"listen.http.handlers.mod_websockets.c2s_state_timeout","text":"

Same as the C2S option

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketsbackwards_compatible_session","title":"listen.http.handlers.mod_websockets.backwards_compatible_session","text":"

Same as the C2S option

"},{"location":"listeners/listen-http/#handler-types-graphql-api-mongoose_graphql_handler","title":"Handler types: GraphQL API - mongoose_graphql_handler","text":"

For more information about the API, see the Admin interface and User interface documentation. The following options are supported for this handler:

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlerschema_endpoint","title":"listen.http.handlers.mongoose_graphql_handler.schema_endpoint","text":"
  • Syntax: string, one of \"admin\", \"domain_admin\", \"user\"
  • Default: no default, this option is mandatory
  • Example: schema_endpoint = \"admin\"

Specifies the schema endpoint:

  • admin - Endpoint with the admin commands. A global admin has permission to execute all commands. See the recommended configuration - Example 2.
  • domain_admin - Endpoint with the admin commands. A domain admin has permission to execute only commands with the owned domain. See the recommended configuration - Example 3.
  • user - Endpoint with the user commands. Used to manage the authorized user. See the recommended configuration - Example 4.
"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlerusername-only-for-admin","title":"listen.http.handlers.mongoose_graphql_handler.username - only for admin","text":"
  • Syntax: string
  • Default: not set
  • Example: username = \"admin\"

When set, enables authentication for the admin API, otherwise it is disabled. Requires setting password.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlerpassword-only-for-admin","title":"listen.http.handlers.mongoose_graphql_handler.password - only for admin","text":"
  • Syntax: string
  • Default: not set
  • Example: password = \"secret\"
"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlerallowed_categories","title":"listen.http.handlers.mongoose_graphql_handler.allowed_categories","text":"
  • Syntax: non-empty array of strings. Allowed values: \"checkAuth\", \"account\", \"domain\", \"last\", \"muc\", \"muc_light\", \"session\", \"stanza\", \"roster\", \"vcard\", \"private\", \"metric\", \"stat\", \"gdpr\", \"mnesia\", \"server\", \"inbox\", \"http_upload\", \"offline\", \"token\"
  • Default: all GraphQL categories enabled
  • Example: allowed_categories = [\"domain\", \"last\"]

By default, when the option is not included, all GraphQL categories are enabled, so you don't need to add this option. When this option is added, only listed GraphQL categories will be processed. For others, the error \"category disabled\" will be returned.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlersse_idle_timeout","title":"listen.http.handlers.mongoose_graphql_handler.sse_idle_timeout","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 3600000
  • Example: schema_endpoint = \"admin\"

This option specifies the time in milliseconds after which the SSE connection is closed when idle. The default value is 1 hour.

"},{"location":"listeners/listen-http/#handler-types-rest-api-admin-mongoose_admin_api","title":"Handler types: REST API - Admin - mongoose_admin_api","text":"

The recommended configuration is shown in Example 5 below. For more information about the API, see the REST interface documentation. The following options are supported for this handler:

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_admin_apiusername","title":"listen.http.handlers.mongoose_admin_api.username","text":"
  • Syntax: string
  • Default: not set
  • Example: username = \"admin\"

When set, enables authentication for the admin API, otherwise it is disabled. Requires setting password.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_admin_apipassword","title":"listen.http.handlers.mongoose_admin_api.password","text":"
  • Syntax: string
  • Default: not set
  • Example: password = \"secret\"

Required to enable authentication for the admin API.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_admin_apihandlers","title":"listen.http.handlers.mongoose_admin_api.handlers","text":"
  • Syntax: array of strings. Allowed values: \"contacts\", \"users\", \"sessions\", \"messages\", \"stanzas\", \"muc_light\", \"muc\", \"inbox\", \"domain\", \"metrics\".
  • Default: all API handler modules enabled
  • Example: handlers = [\"domain\"]

The admin API consists of several handler modules, each of them implementing a subset of the functionality. By default, all modules are enabled, so you don't need to change this option.

"},{"location":"listeners/listen-http/#handler-types-rest-api-client-mongoose_client_api","title":"Handler types: REST API - Client - mongoose_client_api","text":"

The recommended configuration is shown in Example 6 below. Please refer to REST interface documentation for more information. The following options are supported for this handler:

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_client_apihandlers","title":"listen.http.handlers.mongoose_client_api.handlers","text":"
  • Syntax: array of strings. Allowed values: \"sse\", \"messages\", \"contacts\", \"rooms\", \"rooms_config\", \"rooms_users\", \"rooms_messages\".
  • Default: all API handler modules enabled
  • Example: handlers = [\"messages\", \"sse\"]

The client API consists of several handler modules, each of them implementing a subset of the functionality. By default, all modules are enabled, so you don't need to change this option.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_client_apidocs","title":"listen.http.handlers.mongoose_client_api.docs","text":"
  • Syntax: boolean
  • Default: true
  • Example: docs = \"false\"

The Swagger documentation of the client API is hosted at the /api-docs path. You can disable the hosted documentation by setting this option to false.

"},{"location":"listeners/listen-http/#transport-options","title":"Transport options","text":"

The options listed below are used to modify the HTTP transport settings.

"},{"location":"listeners/listen-http/#listenhttptransportnum_acceptors","title":"listen.http.transport.num_acceptors","text":"
  • Syntax: positive integer
  • Default: 100
  • Example: transport.num_acceptors = 10

Number of HTTP connection acceptors.

"},{"location":"listeners/listen-http/#listenhttptransportmax_connections","title":"listen.http.transport.max_connections","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 1024
  • Example: transport.max_connections = \"infinity\"

Maximum number of open connections. The default value of 1024 is set by the Ranch library.

"},{"location":"listeners/listen-http/#tls-https-options","title":"TLS (HTTPS) options","text":"

By default, the HTTP listener does not use TLS. To use TLS (HTTPS), you need to add a TOML table (subsection) called tls to the config file with the certfile and keyfile options that specify the location of the certificate and private key files, respectively. If the keyfile is password-protected, password is required as well. If the certificate is signed by an intermediate CA, one will probably want to specify the CA chain with the cacertfile option. The library used for HTTP is the Erlang TLS implementation provided by OTP - see ranch_ssl for details.

The options accepted here are: verify_mode, certfile, cacertfile, ciphers, keyfile, password, versions, dhfile. They have the same semantics as the corresponding c2s options for just_tls.

"},{"location":"listeners/listen-http/#protocol-options","title":"Protocol options","text":"

These are some additional options of the HTTP protocol.

"},{"location":"listeners/listen-http/#listenhttpprotocolcompress","title":"listen.http.protocol.compress","text":"
  • Syntax: boolean
  • Default: false
  • Example: protocol.compress = \"true\"

Compresses response bodies automatically when the client supports it.

"},{"location":"listeners/listen-http/#http-listener-configuration-examples","title":"HTTP listener configuration examples","text":"

The examples shown below are included in the provided default configuration file.

"},{"location":"listeners/listen-http/#example-1-bosh-and-ws","title":"Example 1. BOSH and WS","text":"

The following listener accepts BOSH and WebSocket connections and has TLS configured.

[[listen.http]]\n  port = 5285\n  tls.certfile = \"mycert.pem\"\n  tls.keyfile = \"mykey.pem\"\n  tls.password =  \"secret\"\n\n  [[listen.http.handlers.mod_bosh]]\n    host = \"_\"\n    path = \"/http-bind\"\n\n  [[listen.http.handlers.mod_websockets]]\n    host = \"_\"\n    path = \"/ws-xmpp\"\n
"},{"location":"listeners/listen-http/#example-2-admin-graphql-api","title":"Example 2. Admin GraphQL API","text":"

GraphQL API for administration, the listener is bound to 127.0.0.1 for increased security. The number of acceptors and connections is specified (reduced).

[[listen.http]]\n  ip_address = \"127.0.0.1\"\n  port = 5551\n  transport.num_acceptors = 5\n  transport.max_connections = 10\n\n  [[listen.http.handlers.mongoose_graphql_handler]]\n    host = \"localhost\"\n    path = \"/api/graphql\"\n    schema_endpoint = \"admin\"\n    username = \"admin\"\n    password = \"secret\"\n    allowed_categories = [\"server\", \"last\", \"vcard\"]\n
"},{"location":"listeners/listen-http/#example-3-domain-admin-graphql-api","title":"Example 3. Domain Admin GraphQL API","text":"

GraphQL API for the domain admin.

[[listen.http]]\n  ip_address = \"0.0.0.0\"\n  port = 5541\n  transport.num_acceptors = 10\n  transport.max_connections = 1024\n\n  [[listen.http.handlers.mongoose_graphql_handler]]\n    host = \"_\"\n    path = \"/api/graphql\"\n    schema_endpoint = \"domain_admin\"\n
"},{"location":"listeners/listen-http/#example-4-user-graphql-api","title":"Example 4. User GraphQL API","text":"

GraphQL API for the user.

[[listen.http]]\n  ip_address = \"0.0.0.0\"\n  port = 5561\n  transport.num_acceptors = 10\n  transport.max_connections = 1024\n\n  [[listen.http.handlers.mongoose_graphql_handler]]\n    host = \"_\"\n    path = \"/api/graphql\"\n    schema_endpoint = \"user\"\n
"},{"location":"listeners/listen-http/#example-5-admin-rest-api","title":"Example 5. Admin REST API","text":"

REST API for administration, the listener is bound to 127.0.0.1 for increased security. The number of acceptors and connections is specified (reduced). Basic HTTP authentication is used as well.

[[listen.http]]\n  ip_address = \"127.0.0.1\"\n  port = 8088\n  transport.num_acceptors = 5\n  transport.max_connections = 10\n\n  [[listen.http.handlers.mongoose_admin_api]]\n    host = \"localhost\"\n    path = \"/api\"\n    username = \"admin\"\n    password = \"secret\"\n
"},{"location":"listeners/listen-http/#example-6-client-rest-api","title":"Example 6. Client REST API","text":"

REST API for clients.

[[listen.http]]\n  port = 8089\n  transport.max_connections = 1024\n  protocol.compress = true\n\n  [[listen.http.handlers.mongoose_client_api]]\n    host = \"_\"\n    path = \"/api\"\n
"},{"location":"listeners/listen-s2s/","title":"Server to server (S2S): [[listen.s2s]]","text":"

Handles incoming server-to-server (S2S) connections (federation). The recommended port number for an S2S listener is 5269 as registered in the XMPP protocol.

Note

Many S2S options are configured in the s2s section of the configuration file, and they apply to both incoming and outgoing connections.

"},{"location":"listeners/listen-s2s/#configuration-options","title":"Configuration options","text":""},{"location":"listeners/listen-s2s/#listens2sshaper","title":"listen.s2s.shaper","text":"
  • Syntax: string, name of the shaper rule or \"none\"
  • Default: \"none\" - no shaper
  • Example: shaper = \"s2s_shaper\"

Name of the rule that determines what traffic shaper is used to limit the incoming XMPP traffic to prevent the server from being flooded with incoming data. The rule referenced here needs to be defined in the access config section, and it should return the shaper name or the value \"none\".

"},{"location":"listeners/listen-s2s/#tls-options-for-s2s","title":"TLS options for S2S","text":"

S2S connections do not use TLS encryption unless enabled with the use_starttls option in the s2s section. You can specify additional options of the TLS encryption in the tls subsection of the listener configuration. Accepted options are: verify_mode, certfile, cacertfile, dhfile, ciphers and protocol_options. They have the same semantics as the corresponding c2s options for fast_tls.

"},{"location":"listeners/listen-s2s/#s2s-listener-configuration-example","title":"S2S listener configuration example","text":"

The following section configures an S2S listener with some basic settings set up. The s2s_shaper access rule is used, which requires a definition in the access section.

[[listen.s2s]]\n  port = 5269\n  shaper = \"s2s_shaper\"\n  max_stanza_size = 131072\n  tls.dhfile = \"dh_server.pem\"\n
"},{"location":"migrations/3.1.1_3.2.0/","title":"3.1.1 to 3.2.0","text":""},{"location":"migrations/3.1.1_3.2.0/#odbc-renamed-to-rdbms-in-module-names-and-options","title":"odbc renamed to rdbms in module names and options","text":"
  • For MongooseIM users: simply replace all instances of odbc in your config files with rdbms. E.g. {auth_method, odbc}. would now be {auth_method, rdbms}.. It's also important to note that all metrics that previously contained odbc in their names have also been renamed to contain rdbms instead.

Please note that odbc_server has been completely replaced with new outgoing_pools (see one of the next sections of this document) config element.

  • For developers calling MongooseIM modules: most modules, functions and atoms had odbc in their names replaced with rdbms. The only exceptions to this rule were names actually pertaining to the ODBC driver, e.g. mongoose_rdbms_odbc.
"},{"location":"migrations/3.1.1_3.2.0/#ejabberdcfg-renamed-to-mongooseimcfg","title":"ejabberd.cfg renamed to mongooseim.cfg","text":"

Rename the existing config file of MongooseIM from ejabberd.cfg to mongooseim.cfg.

"},{"location":"migrations/3.1.1_3.2.0/#pools-configuration","title":"Pools configuration","text":"

Configuring pools to external services has changed, please see Outgoing Connection doc for more details.

Note

Keep in mind that outgoing_pools is a list of pools, it may turn out that you will have more than one entry in the list when more than a single outgoing pool is needed.

"},{"location":"migrations/3.1.1_3.2.0/#example-old-format","title":"Example - Old format","text":"
{elasticsearch_server, [{host, \"elastic.host.com\"}, {port, 9042}]}.\n{riak_server, [{pool_size, 20}, {address, \"127.0.0.1\"}, {port, 8087}, {riak_pb_socket_opts, []}]}.\n{http_connections, [{conn1, [{server, \"http://server:8080\"}, {pool_size, 50}]} ]}.\n{cassandra_servers, [\n  {default, 100,\n   [\n    {servers,\n     [\n      {\"cassandra_server1.example.com\", 9042},\n      {\"cassandra_server2.example.com\", 9042},\n      {\"cassandra_server3.example.com\", 9042},\n      {\"cassandra_server4.example.com\", 9042}\n     ]\n    },\n    {keyspace, \"big_mongooseim\"}\n   ]\n  }\n ]\n}.\n
"},{"location":"migrations/3.1.1_3.2.0/#example-new-format","title":"Example - New format","text":"

This section provides direct \"translation\" of configuration from \"Old format\" section.

{outgoing_pools, [\n  {elastic, global, default, [], [{host, \"elastic.host.com\"}, {port, 9042}]},\n  {riak, global, default, [{workers, 20}], [{address, \"127.0.0.1\"}, {port, 8087}]},\n  {http, global, conn1, [{workers, 50}], [{server, \"http://server:8080\"}]},\n  {cassandra, global, default, [{workers, 100}], [\n        {servers, [\n          {\"cassandra_server1.example.com\", 9042},\n          {\"cassandra_server2.example.com\", 9042},\n          {\"cassandra_server3.example.com\", 9042},\n          {\"cassandra_server4.example.com\", 9042}\n        ]},\n        {keyspace, \"big_mongooseim\"}\n    ]}\n]}.\n
"},{"location":"migrations/3.1.1_3.2.0/#rdbms-configuration-migration","title":"RDBMS configuration migration","text":"

RDBMS pools are no longer configured by a {pool, odbc, _} tuple, instead using the generic outgoing pools mechanism. The connection configuration is now passed via server option of the pool instead of being configured via a top-level {odbc_server, _} tuple. Similarly, the number of workers is no longer configured by odbc_pool_size, and the default pool no longer set by odbc_pool. A top-level odbc_keepalive_interval is now also specified as an option for a specific pool. For example:

{odbc_pool_size, 10}.\n{pool, odbc, default}.\n{odbc_server_type, mssql}.\n{odbc_server, \"DSN=mongoose-mssql;UID=sa;PWD=mongooseim_secret+ESL123\"}.\n{odbc_keepalive_interval, 10}.\n

will now become:

{rdbms_server_type, mssql}.\n{outgoing_pools, [\n {rdbms, global, default, [{workers, 10}],\n  [{server, \"DSN=mongoose-mssql;UID=sa;PWD=mongooseim_secret+ESL123\"}, {keepalive_interval, 10}]}\n]}.\n

Note that odbc_server_type was only renamed to rdbms_server_type and still remains a top-level configuration value.

"},{"location":"migrations/3.1.1_3.2.0/#sm_backend","title":"sm_backend","text":"

If you had the sm_backend set to redis like below:

{sm_backend, {redis, [{pool_size, 3}, {worker_config, [{host, \"localhost\"}, {port, 6379}]}]}}.\n

The pool needs to be defined inside outgoing_pools like this:

{outgoing_pools, [\n {redis, global, default, [{workers, 3}],\n  [{host, \"localhost\"},\n   {port, 6379}]}\n]}.\n

and the sm_backend configuration needs to changed to just:

{sm_backend, {redis, []}}.\n
"},{"location":"migrations/3.1.1_3.2.0/#mod_global_distrib","title":"mod_global_distrib","text":"

If you had mod_global_distrib configured in the following way:

{mod_global_distrib, [\n        (...)\n        {redis, [\n              {pool_size, 24},\n              {server, \"172.16.0.3\"}\n             ]}\n       ]}\n

The redis pool needs to be defined inside outgoing_pools:

{outgoing_pools, [\n {redis, global, global_distrib, [{workers, 24}], [{host, \"172.16.0.3\"}]}\n]}.\n
"},{"location":"migrations/3.3.0_3.4.0/","title":"3.3.0 to 3.4.0","text":""},{"location":"migrations/3.3.0_3.4.0/#new-field-in-message-archive-management-muc-entries-sender-id","title":"New field in Message Archive Management MUC entries: Sender ID","text":"

As a part of ensuring GDPR compliance, it is essential to be able to efficiently query MAM MUC data via sender ID (to retrieve user's personal data). Originally, the sender JID could be found only as a part of an encoded XML message element, so finding all items sent by a certain user would be extremely inefficient (or rather: anti-efficient). MongooseIM 3.4.0 uses a modified schema for MAM MUC backends which enables a more efficient extraction.

Below you may find migration instructions specific to your MAM backend.

"},{"location":"migrations/3.3.0_3.4.0/#rdbms","title":"RDBMS","text":""},{"location":"migrations/3.3.0_3.4.0/#step-1","title":"Step 1","text":"

Please execute the following SQL statements on your MIM database:

MySQL

ALTER TABLE mam_muc_message ADD COLUMN sender_id INT UNSIGNED;\nCREATE INDEX i_mam_muc_message_sender_id USING BTREE ON mam_muc_message(sender_id);\n

PostgreSQL

ALTER TABLE mam_muc_message ADD COLUMN sender_id INT;\nCREATE INDEX i_mam_muc_message_sender_id ON mam_muc_message USING BTREE (sender_id);\n

MSSQL

ALTER TABLE [dbo].[mam_muc_message] ADD sender_id bigint;\nCREATE INDEX i_mam_muc_message_sender_id ON mam_muc_message(sender_id);\n
"},{"location":"migrations/3.3.0_3.4.0/#step-2","title":"Step 2","text":"

Now you have a schema that is compatible with MIM 3.4.0 but isn't GDPR-compliant yet because the new column has no meaningful data.

Please pick your favourite scripting/programming language and populate the new column with the help of a dedicated script. You'll need to iterate over the whole mam_muc_message table with the following algorithm:

  1. Provide message column content to the script.
  2. The script returns sender's JID as username@server string. You need to split it to get a separate username and server.
  3. Select ID from mam_server_user by the username and server. If it doesn't exist, insert a new one (id column is automatically incremented).
  4. Update the sender_id column in mam_muc_message with the retrieved ID.
"},{"location":"migrations/3.3.0_3.4.0/#cassandra","title":"Cassandra","text":""},{"location":"migrations/3.3.0_3.4.0/#step-1_1","title":"Step 1","text":"

Please execute the following CQL statements on your MIM database:

USE mongooseim;\nALTER TABLE mam_muc_message ADD from_jid varchar;\nCREATE INDEX ON mam_muc_message (from_jid);\nDESC mam_muc_message;\n
"},{"location":"migrations/3.3.0_3.4.0/#step-2_1","title":"Step 2","text":"

Now you have a schema that is compatible with MIM 3.4.0 but isn't GDPR-compliant yet because the new column has no meaningful data.

Please pick your favourite scripting/programming language and populate the new column with the help of a dedicated script. You'll need to iterate over the whole mam_muc_message table with the following algorithm:

  1. Extract the whole mam_muc_message table. Please make sure to use the paging feature of your Cassandra client, as the MAM tables tend to be very large.
    SELECT * FROM mam_muc_message;\n
  2. To make data extraction faster, MongooseIM stores 2 copies of the message in the table:
    cqlsh:mongooseim> SELECT * FROM mam_muc_message WHERE id = 399582233150625537 ALLOW FILTERING;\n\n room_jid                      | with_nick | id                 | from_jid | message                        | nick_name\n-------------------------------+-----------+--------------------+----------+--------------------------------+-----------\n room-ad1d999b9e@muc.localhost |           | 399582233150625537 |     null | 0x8350000001...998de2fa8426837 |       Sid\n room-ad1d999b9e@muc.localhost |       Sid | 399582233150625537 |     null | 0x8350000001...998de2fa8426837 |       Sid\n
  3. The copy with an empty with_nick column must be updated.
  4. Extract the sender's JID from the message column in the same way as described in the RDBMS migration section. By default cassandra backend uses the eterm format.
  5. Update the from_jid column with the value of the extracted sender's JID:
    cqlsh:mongooseim> UPDATE mam_muc_message SET from_jid = 'username@server' WHERE id = 399582233150625537  AND with_nick = '' AND room_jid = 'room-ad1d999b9e@muc.localhost';\ncqlsh:mongooseim> SELECT * FROM mam_muc_message WHERE id = 399582233150625537 ALLOW FILTERING;\n\n room_jid                      | with_nick | id                 | from_jid        | message                        | nick_name\n-------------------------------+-----------+--------------------+-----------------+--------------------------------+-----------\n room-ad1d999b9e@muc.localhost |           | 399582233150625537 | username@server | 0x8350000001...998de2fa8426837 |       Sid\n room-ad1d999b9e@muc.localhost |       Sid | 399582233150625537 |            null | 0x8350000001...998de2fa8426837 |       Sid\n
"},{"location":"migrations/3.3.0_3.4.0/#riak","title":"Riak","text":"

Changes to Riak schema are backward compatible with the current MongooseIM release. This means that skipping the migration will cause only some of the new features (namely GDPR data retrieval) to not work correctly.

"},{"location":"migrations/3.3.0_3.4.0/#step-1_2","title":"Step 1","text":"

Please update the Riak schema:

# Set the RIAK_HOST to your Riak HTTP endpoint\n# Set the RIAK_MAM_SCHEMA_PATH to point to new schema path, which\n# by default is: RIAK_MAM_SCHEMA_PATH=tools/mam_search_schema.xml\ncurl -v -XPUT $RIAK_HOST/search/schema/mam \\\n    -H 'Content-Type:application/xml' \\\n    --data-binary @${RIAK_MAM_SCHEMA_PATH}\n

After that we need to either reload all Riak nodes (restart them) or manually reload the schema on live nodes. Reloading the schema on live nodes requires access to Erlang Shell of one of the Riak nodes (any of them). The instruction on how to get to Riak's Erlang shell is beyond this guide, but if you manage to get to it, just call:

yz_index:reload(<<\"mam\">>).\n
"},{"location":"migrations/3.3.0_3.4.0/#step-2_2","title":"Step 2","text":"

After the schema is posted and reloaded, all \"new\" objects will be indexed properly as long they contain 2 new fields: msg_owner_jid and mam_type. The new MongooseIM code will insert both of them for all new MAM entries, but for all existing ones need to have the fields added. In order to do that, we need to create a migration script (just pick your favourite scripting/programming language) that will do the following for each object in each bucket of type mam_yz (the object will be referred as obj):

  • Use this dedicated script to convert the obj.packet_register field value into a so called $SENDER_JID.
  • If the script returns $SENDER_JID correctly:
  • set obj.mam_type = 'muc'
  • set obj.msg_owner_jid = $SENDER_JID
  • If the script returns error code -2
  • set obj.mam_type = 'pm'
  • based on obj_yz_rk formatted as $LOCAL_JID/$REMOTE_JID/$MSG_ID, set obj.msg_owner_jid = $LOCAL_JID
  • Save the modified obj
"},{"location":"migrations/3.3.0_3.4.0/#elasticsearch","title":"ElasticSearch","text":""},{"location":"migrations/3.3.0_3.4.0/#step-1_3","title":"Step 1","text":"

Please update the mapping for muc_messages:

PUT muc_messages/_mapping/muc\n{\n  \"properties\": {\n    \"mam_id\": {\n      \"type\": \"long\"\n    },\n    \"room\": {\n      \"type\": \"keyword\"\n    },\n    \"from_jid\" : {\n      \"type\": \"keyword\"\n    },\n    \"source_jid\": {\n      \"type\": \"keyword\"\n    },\n    \"message\": {\n      \"type\": \"text\",\n      \"index\": false\n    },\n    \"body\": {\n      \"type\": \"text\",\n      \"analyzer\": \"english\"\n    }\n  }\n}\n
"},{"location":"migrations/3.3.0_3.4.0/#step-2_3","title":"Step 2","text":"

Now you have a schema that is compatible with MIM 3.4.0 but isn't GDPR-compliant yet because the new field has no meaningful data.

Please pick your favourite scripting/programming language and populate the new column with the help of a dedicated script. You'll need to iterate over the all muc_messages documents with the following algorithm:

  1. Extract some documents (notice the size parameter) for conversion:
    GET muc_messages/_search/?size=100&q=!_exists_:from_jid\n
  2. Extract the sender's JID from the message field in the same way as described in the RDBMS migration section. Elasticsearch backend uses exclusively the xml format.
  3. Update the from_jid column with the value of the extracted sender's JID:
    POST localhost:9200/muc_messages/muc/%_id%/_update\n{\n  \"doc\": {\n    \"from_jid\" : \"%sender's jid%\"\n  }\n}\n
  4. Repeat all the actions until the full conversion of the database is done.
"},{"location":"migrations/3.5.0_3.6.0/","title":"3.5.0 to 3.6.0","text":""},{"location":"migrations/3.5.0_3.6.0/#push-notifications","title":"Push notifications","text":"

In this version, push notifications work with MongoosePush 2.0.0 and its API v3 by default.

"},{"location":"migrations/3.5.0_3.6.0/#push-notifications-are-send-from-the-servers-jid","title":"Push notifications are send from the server's JID","text":"

Since this version, MongooseIM sends the PubSub publish request to push notifications node from the server's JID. Previously the publish request was sent from the user's JID. If the push PubSub node was created with pubsub#access_mode set to whitelist and pubsub#publish_model set to publishers, now the server's JID needs to be added to the push node in order to send the push notifications successfully.

It can be done by sending the following request from the push node's owner:

<iq to='pubsub.mypubsub'\n    type='set'\n    id='wy6Hibg='\n    from='alice@wonderland.com/resource'>\n    <pubsub xmlns='http://jabber.org/protocol/pubsub#owner'>\n        <affiliations node='punsub_node_for_my_private_iphone'>\n            <affiliation jid='mychat.com' affiliation='publish-only'/>\n        </affiliations>\n    </pubsub>\n</iq>\n
"},{"location":"migrations/3.5.0_3.6.0/#mod_push-module-is-no-longer-available","title":"mod_push module is no longer available","text":"

mod_push has been deprecated since MongooseIM 2.1.1 and it is no longer present in this release. Please use the push backend for mod_event_pusher, which is the direct equivalent of mod_push.

"},{"location":"migrations/3.5.0_3.6.0/#different-muc-light-room-schema-definition","title":"Different MUC Light room schema definition","text":"

We have introduced a change that enforces defining fields with default values. The previous setup led to problems with the RDBMS backend as separating MUC Light options for the schema from the default values was unintuitive. In a specific case when the default config was a subset of the schema and the client failed to provide these values when a room was created, MUC Light stored the incomplete config in the table. Then the missing config fields could not be supplied by the clients. If you've experienced this issue, a way to fix it is described in the Known issues page.

The current method makes it impossible to make the same mistake, as it disallows field definition without any default value.

"},{"location":"migrations/3.5.0_3.6.0/#what-has-changed-for-administrators","title":"What has changed? - for administrators","text":"
  • It's no longer possible to declare a room config field only with its name.
  • There is no default_config option anymore.
  • Declaring a field name and type without an atom key is no longer supported.
"},{"location":"migrations/3.5.0_3.6.0/#example-1","title":"Example 1","text":"

Old config:

{config_schema, [\n                 \"roomname\",\n                 \"subject\",\n                 \"background\",\n                 \"notification_sound\"\n                ]},\n{default_config, [\n                  {\"roomname\", \"The room\"},\n                  {\"subject\", \"Chit-chat\"}\n                 ]}\n

New config:

{config_schema, [\n                 {\"roomname\", \"The room\"},\n                 {\"subject\", \"Chit-chat\"},\n                 {\"background\", \"\"},\n                 {\"notification_sound\", \"\"}\n                ]}\n
"},{"location":"migrations/3.5.0_3.6.0/#example-2","title":"Example 2","text":"

Old config:

{config_schema, [\n                 \"roomname\",\n                 {\"subject\", binary},\n                 {\"priority\", priority, integer},\n                 {\"owners-height\", owners_height, float}\n                ]},\n{default_config, [\n                  {\"roomname\", \"The room\"},\n                  {\"subject\", \"Chit-chat\"},\n                  {\"priority\", 10}]}\n

New config:

{config_schema, [\n                 {\"roomname\", \"The room\"},\n                 {\"subject\", \"Chit-chat\"},\n                 {\"priority\", 10, priority, integer},\n                 {\"owners-height\", 180.0, owners_height, float}\n                ]}\n
"},{"location":"migrations/3.5.0_3.6.0/#what-has-changed-for-developers","title":"What has changed? - for developers","text":"

The room config schema is currently stored in a completely different data structure, so if you have any custom modules that use it, you'll need to adjust them. Additionally, all definitions and the room config API have been extracted from mod_muc_light.hrl and mod_muc_light_utils.erl into mod_muc_light_room_config.erl module.

For more information, please check the specs for types and functions in the aforementioned file.

"},{"location":"migrations/3.5.0_3.6.0/#what-hasnt-changed","title":"What hasn't changed?","text":"
  • The default room config is still the same, i.e. roomname (default: \"Untitled\") and subject (empty string).
  • The room config representation in databases (both Mnesia and RDBMS) is the same; no need for migration.
"},{"location":"migrations/3.5.0_3.6.0/#offline-storage","title":"Offline storage","text":"

In this version the offline storage entries contain one additional information for internal use. Riak and mnesia backends don't require any changes when upgrading to this version. In case of the RDBMS backends, a new column needs to be added. Below there are MySQL, PgSQL and MSSQL queries which can be used to add the new column.

MySQL

ALTER TABLE offline_message ADD COLUMN permanent_fields mediumblob;\n

PostgreSQL

ALTER TABLE offline_message ADD COLUMN permanent_fields bytea;\n

MSSQL

ALTER TABLE [dbo].[offline_message] ADD permanent_fields varbinary(max);\n
"},{"location":"migrations/3.5.0_3.6.0/#persistent-cluster-id","title":"Persistent Cluster ID","text":"

In this version, a new cluster ID has been created, to correctly identify the lifetime of a cluster, across restarts and nodes joining and leaving. This is used for example by System Metrics. This cluster ID is persisted in RDBMS, when an RDBMS database is available, but a new table is required:

MySQL

CREATE TABLE mongoose_cluster_id (k varchar(50) PRIMARY KEY, v text);\n

PostgreSQL

CREATE TABLE mongoose_cluster_id (k varchar(50) PRIMARY KEY, v text);\n

MSSQL

CREATE TABLE mongoose_cluster_id (k varchar(50) NOT NULL PRIMARY KEY, v text);\n

"},{"location":"migrations/3.6.0_3.7.0/","title":"3.6.0 to 3.7.0","text":""},{"location":"migrations/3.6.0_3.7.0/#extended-scram-sha-support","title":"Extended SCRAM-SHA Support","text":"

Since this version, SCRAM authentication mechanisms were extended to support additional hashing algorithms. So far only SHA-1 was available for hashing and now SHA-224, SHA-256, SHA-384 and SHA-512 are also supported. This includes the authentication mechanisms and the password format that is stored. Please note that enabling and using this functionality might require adjusting the server setup.

"},{"location":"migrations/3.6.0_3.7.0/#sasl-mechanisms","title":"SASL mechanisms","text":"

The possible list of allowed SALS mechanisms was changed. We've added new and more secure methods that can be used during stream negotiation.

Please note that if you were using the following in the configurations file

{sasl_mechanisms, [cyrsasl_scram]}

using cyrsasl_scram as sasl_mechanism is now incorrect. You can achieve the same result of allowing the usage of SHA-1 with SCRAM authentication mechanism with:

{sasl_mechanisms, [cyrsasl_scram_sha1]}

You can also specify a list of all supported SCRAM-SHA mechanisms with:

{sasl_mechanisms, [cyrsasl_scram_sha1, cyrsasl_scram_sha224, cyrsasl_scram_sha256, cyrsasl_scram_sha384, cyrsasl_scram_sha512, cyrsasl_scram_sha1_plus, cyrsasl_scram_sha224_plus, cyrsasl_scram_sha256_plus, cyrsasl_scram_sha384_plus, cyrsasl_scram_sha512_plus]}

Before setting up this configuration, please make sure that the client application is capable of authenticating with a selected set of authentication mechanisms. For more details please refer to the authentication section.

"},{"location":"migrations/3.6.0_3.7.0/#scram-password-format","title":"SCRAM password format","text":"

To complement the extensions of the authentication mechanisms, the SCRAM password format was also updated. Please note that SCRAM is now the default password format. While it is still possible to configure the password storage in plaintext format, we highly discourage doing so for security reasons. Changing the default of this option can lead to unexpected behaviours, so if after the upgrade you encounter issues with authenticating the users, please check the conifg file. If you are missing any of the following configuration lines:

{password_format, scram} or {password_format, plain}

it means that you were using the default plaintext format.

Since the default of the password format has changed, your MongooseIM server thinks that the plaintext passwords are stored as SCRAM hashes. This can lead to users failing to authenticate.

If you are still using the plaintext password format, please consider migrating your password storage to store scram hashes instead. Using the plaintext password format is still possible to support legacy installations and to ease the debugging while developing new features. Should you want to continue using the plaintext password format please add the following in the auth_opts:

{password_format, plain}

Legacy plaintext and SCRAM formats are still supported. Nonetheless, please note that if you were using SCRAM as a password format, this meant that SHA-1 was used as the hashing algorithm. This allowed authenticating with PLAINTEXT and SCRAM-SHA-1.

In the new setup the user will still authenticate with those mechanisms given the possible slight syntax change explained above.

However, mixing of the old password format with the new authentication mechanisms can lead to conflicting situations where:

  1. A user wants to authenticate with e.g. SCRAM-SHA-256.
  2. His old password format is only storing SHA-1 password hash.
  3. The authentication fails as it is not possible to derive SHA-256 hash from SHA-1.

If you want to use the new password format with a full set of supported SHA hashes, a password change is required to calculate all the new SHA hashes. Otherwise, please make sure that you provide the right sasl_mechanism configuration, where the mechanism you authenticate with is compatible with the password format you store.

For more details related to the new password format, please refer to authentication and SCRAM serialization sections.

"},{"location":"migrations/3.6.0_3.7.0/#message-retraction","title":"Message retraction","text":"

If you are using MAM with RDBMS, please update your database schema with the following queries. This change is necessary as the support for XEP-0424: Message Retraction requires a new column for the origin_id attribute of MAM messages, which allows MAM to identify the messages to retract. Indexes for this column are required for efficient queries. Only the messages stored after this change can be retracted.

MySQL

ALTER TABLE mam_message ADD COLUMN origin_id varchar(250) CHARACTER SET binary;\nCREATE INDEX i_mam_message_username_jid_origin_id USING BTREE ON mam_message (user_id, remote_bare_jid, origin_id);\n\nALTER TABLE mam_muc_message ADD COLUMN origin_id varchar(250) CHARACTER SET binary;\nCREATE INDEX i_mam_muc_message_room_id_sender_id_origin_id USING BTREE ON mam_muc_message (room_id, sender_id, origin_id);\n

PostgreSQL

ALTER TABLE mam_message ADD COLUMN origin_id varchar;\nCREATE INDEX i_mam_message_username_jid_origin_id ON mam_message USING BTREE (user_id, remote_bare_jid, origin_id);\n\nALTER TABLE mam_muc_message ADD COLUMN origin_id varchar;\nCREATE INDEX i_mam_muc_message_room_id_sender_id_origin_id ON mam_muc_message USING BTREE (room_id, sender_id, origin_id);\n

MSSQL

Note

i_mam_message_username_jid_id was missing from the schema, this is now fixed. It is not required by message retraction, but this change is recommended.

ALTER TABLE mam_message ADD origin_id nvarchar(250) NULL;\nCREATE INDEX i_mam_message_username_jid_id ON mam_message (user_id, remote_bare_jid, id);\nCREATE INDEX i_mam_message_username_jid_origin_id ON mam_message (user_id, remote_bare_jid, origin_id);\n\nALTER TABLE mam_muc_message ADD origin_id nvarchar(250) NULL;\nCREATE INDEX i_mam_muc_message_room_id_sender_id_origin_id ON mam_muc_message (room_id, sender_id, origin_id);\n
"},{"location":"migrations/3.6.0_3.7.0/#rdbms-backend-for-multi-user-chats-muc","title":"RDBMS backend for Multi-User Chats (MUC)","text":"

If you're planning to use the new RDBMS backend for MUC, note that the following tables need to be added to the schema:

MySQL

CREATE TABLE muc_rooms(\n    id SERIAL,\n    muc_host VARCHAR(250)   NOT NULL,\n    room_name VARCHAR(250)       NOT NULL,\n    options JSON            NOT NULL,\n    PRIMARY KEY (muc_host, room_name)\n);\n\nCREATE TABLE muc_room_aff(\n    room_id BIGINT          NOT NULL REFERENCES muc_rooms(id),\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    resource VARCHAR(250)   NOT NULL,\n    aff SMALLINT            NOT NULL\n);\n\nCREATE INDEX i_muc_room_aff_id ON muc_room_aff (room_id);\n\nCREATE TABLE muc_registered(\n    muc_host VARCHAR(250)   NOT NULL,\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    nick VARCHAR(250)       NOT NULL,\n    PRIMARY KEY (muc_host, luser, lserver)\n);\n

PostgreSQL

CREATE TABLE muc_rooms(\n    id BIGSERIAL            NOT NULL UNIQUE,\n    muc_host VARCHAR(250)   NOT NULL,\n    room_name VARCHAR(250)       NOT NULL,\n    options JSON            NOT NULL,\n    PRIMARY KEY (muc_host, room_name)\n);\n\nCREATE TABLE muc_room_aff(\n    room_id BIGINT          NOT NULL REFERENCES muc_rooms(id),\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    resource VARCHAR(250)   NOT NULL,\n    aff SMALLINT            NOT NULL\n);\n\nCREATE INDEX i_muc_room_aff_id ON muc_room_aff (room_id);\n\nCREATE TABLE muc_registered(\n    muc_host VARCHAR(250)   NOT NULL,\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    nick VARCHAR(250)       NOT NULL,\n    PRIMARY KEY (muc_host, luser, lserver)\n);\n

MSSQL

CREATE TABLE muc_rooms(\n    id BIGINT IDENTITY(1,1) NOT NULL UNIQUE,\n    muc_host VARCHAR(250)   NOT NULL,\n    room_name VARCHAR(250)       NOT NULL,\n    options VARCHAR(MAX)    NOT NULL,\n    PRIMARY KEY (muc_host, room_name)\n);\n\nCREATE TABLE muc_room_aff(\n    room_id BIGINT          NOT NULL REFERENCES muc_rooms(id),\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    resource VARCHAR(250)   NOT NULL,\n    aff SMALLINT            NOT NULL\n);\n\nCREATE INDEX i_muc_room_aff_id ON muc_room_aff (room_id);\n\nCREATE TABLE muc_registered(\n    muc_host VARCHAR(250)   NOT NULL,\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    nick VARCHAR(250)       NOT NULL,\n    PRIMARY KEY (muc_host, luser, lserver)\n);\n

"},{"location":"migrations/3.7.0_4.0.0/","title":"3.7.0 to 4.0.0","text":""},{"location":"migrations/3.7.0_4.0.0/#toml-configuration-file","title":"TOML configuration file","text":"

Note that a minor 4.0.1 version has been released with small but important changes to take into account if you're migrating to MongooseIM 4.0.

There is a new TOML configuration file: mongooseim.toml. The legacy mongooseim.cfg file is still supported as an alternative, but deprecated.

You are advised to rewrite your configuration file in the TOML format. Until then, you can still make MongooseIM use the old format by setting the MONGOOSEIM_CONFIG_FORMAT environment variable to cfg:

MONGOOSEIM_CONFIG_FORMAT=cfg mongooseimctl start

"},{"location":"migrations/3.7.0_4.0.0/#changes-in-hooks","title":"Changes in hooks","text":"

If modified the code, e.g. by adding a custom extension module, you might want to update your handlers to the following hooks. You can find them in the mongoose_hooks module.

  • We refactored the MAM (XEP-0313) implementation, replacing the long lists of arguments accepted by the mam_archive_message and mam_muc_archive_message hooks with a more readable key-value structure (a map).
  • The argument list of the failed_to_store_message hook has been changed as a result of code refactoring.
"},{"location":"migrations/3.7.0_4.0.0/#otp-logger-as-the-logging-framework","title":"OTP Logger as the logging framework","text":"

We've transitioned from lager to Logger as our logging framework. No internal changes were introduced, and the default handlers still implement the same behaviour, but the configuration is different, though still done in the same place. To know more, please refer to each framework's documentation.

As an example, for our previous default lager configuration:

 {lager, [\n    %% Make logging more async\n    %% If some very heavy loaded process want to log something, it's better to not block the process.\n    {async_threshold, 2000},\n    {async_threshold_window, 500},\n    %% Kill sink if it has more than 10k messages\n    {killer_hwm, 10000},\n    {killer_reinstall_after, 5000},\n    {log_root, \"log\"},\n    {crash_log, \"crash.log\"},\n    {handlers, [\n        {lager_console_backend, [{level, info}]},\n        {lager_file_backend, [{file, \"ejabberd.log\"}, {level, info}, {size, 2097152}, {date, \"$D0\"}, {count, 5}]}\n    ]}\n  ]}\n

The equivalent Logger configuration is

 {kernel, [\n  {logger_level, warning},\n  {logger, [\n    %% Console logger\n    {handler, default, logger_std_h, #{}},\n    %% Disk logger for errors\n    {handler, disk_log, logger_disk_log_h,\n       #{config => #{\n           file => \"log/mongooseim.log\",\n           type => wrap,\n           max_no_files => 5,\n           max_no_bytes => 2097152,\n           sync_mode_qlen => 2000, % If sync_mode_qlen is set to the same value as drop_mode_qlen,\n           drop_mode_qlen => 2000, % synchronous mode is disabled. That is, the handler always runs\n           flush_qlen => 5000,     % in asynchronous mode, unless dropping or flushing is invoked.\n           overload_kill_enable => true\n         },\n         formatter => {logger_formatter, #{\n           depth => 12,\n           chars_limit => 4096\n         }}\n        }\n    }\n  ]}]}\n

"},{"location":"migrations/4.0.0_4.0.1/","title":"4.0.0 to 4.0.1","text":""},{"location":"migrations/4.0.0_4.0.1/#toml-configuration-file","title":"TOML configuration file","text":"

After the latest MongooseIM 4.0.0 release that announced the new TOML configuration format, we've changed a few configuration formats:

  • Removed the backend option for mod_bosh as \"mnesia\" was the only valid option.
  • Removed the backend option for mod_inbox as \"rdbms\" was the only valid option.
  • Deprecated mod_revproxy, it can now only be configured with the older, .cfg configuration file. Please refer to the older versions of the documentation to see how to do this.
  • For mod_global_distrib:
  • Replaced the bounce option with bounce.enabled for mod_global_distrib. It was a \"boolean with only false being a valid option\" which was very confusing. This was because when someone wanted to have bounce enabled it became a TOML table as opposed to a key. Now there is a switch in the bounce section for this behaviour which keeps the behaviour of having bounce enabled by default.
  • Replaced the tls option with tls.enabled for mod_global_distrib for the same reason. The only issue here is as tls is disabled by default (it needs some options in the tls section to be set), the \"no section - disabled\" approach seems more natural. Just for the consistency, it's changed to be similar to the bounce section in this regard.
"},{"location":"migrations/4.0.0_4.0.1/#mod_http_notification-module-is-no-longer-available","title":"mod_http_notification module is no longer available","text":"

mod_http_notification has been deprecated since MongooseIM 2.1.1 and it is no longer available in this release. Please use the http backend for mod_event_pusher, which is the direct equivalent of mod_http_notification.

"},{"location":"migrations/4.0.0_4.0.1/#metrics","title":"Metrics","text":"

mod_http_notification metric was updated and now is available as mod_event_pusher_http. For more details on how to configure mod_event_pusher with http backend, please see this section.

"},{"location":"migrations/4.0.1_4.1.0/","title":"4.0.1 to 4.1.0","text":""},{"location":"migrations/4.0.1_4.1.0/#http-file-upload","title":"HTTP File Upload","text":"

HTTP File Upload specification older than 0.3.0 is no longer supported, i.e. the one namespaced with urn:xmpp:http:upload. Currently, only the urn:xmpp:http:upload:0 XMLNS is served.

All major, modern client libraries and applications support the 0.3.0+ specification. If you experience any issues with making requests to the HTTP File Upload service, please update your client.

"},{"location":"migrations/4.0.1_4.1.0/#retirement-of-the-old-cfg-format","title":"Retirement of the old *.cfg format","text":"

Since release 4.1.0, we are no longer supporting the *.cfg MongooseIM configuration format. Please use the TOML format instead.

"},{"location":"migrations/4.0.1_4.1.0/#minor-changes-in-the-toml-config-format","title":"Minor changes in the TOML config format","text":"
  • mod_bosh.max_pause instead of maxpause

  • mod_disco.server_info.module: the field is optional, no longer required

  • mod_global_distrib.connections.advertised_endpoints: default not set (false is no longer accepted)

  • mod_global_distrib.connections.tls.enabled: the flag was removed, TLS is enabled by providing the cacertfile and certfile options

  • mod_http_upload.max_file_size: undefined is no longer allowed

  • mod_mam_meta.user_prefs_store: false is no longer allowed

  • mod_muc_light.config_schema: the usage of value and type fields was replaced with one of the following fields: string_value, integer_value or float_value

  • mod_muc_log.css_file: the default value was changed from \"false\" to not set

  • mod_stream_management: minor adjustments of buffer_max and ack_freq options, buffer and ack booleans were added

  • listen.c2s.tls.ciphers, listen.http.tls.ciphers and outgoing_pools.*.*.connection.tls.ciphers: the ciphers should now be formatted as a specification string

  • listen.http.handlers.mod_websockets.ping_rate: none is no longer allowed

"},{"location":"migrations/4.1.0_4.2.0/","title":"4.1.0 to 4.2.0","text":""},{"location":"migrations/4.1.0_4.2.0/#minor-changes-in-the-toml-config-format","title":"Minor changes in the TOML config format","text":"
  • The pgsql_users_number_estimate option was moved to auth.rdbms.users_number_estimate. The new option supports PostgreSQL and MySQL.
"},{"location":"migrations/4.1.0_4.2.0/#db-migrations","title":"DB migrations","text":""},{"location":"migrations/4.1.0_4.2.0/#new-inbox-features","title":"New inbox features","text":"

Inbox now implements new functionality (see inbox), but this required adding new columns to the DB. If you're using inbox, please update the tables as follows:

For Postgres or MySQL:

ALTER TABLE inbox\n  ADD COLUMN archive BOOLEAN DEFAULT false,\n  ADD COLUMN muted_until BIGINT DEFAULT 0;\n
For MSSQL:
ALTER TABLE inbox\n  ADD COLUMN archive TINYINT DEFAULT 0,\n  ADD COLUMN muted_until BIGINT DEFAULT 0;\n

"},{"location":"migrations/4.1.0_4.2.0/#archived-groupchat-messages-in-mod_mam","title":"Archived groupchat messages in mod_mam","text":"

The archive_groupchats option is now set to false by default, as documented. Before the change, the private message (PM) archive stored incoming groupchat messages as well, contrary to the documentation. After the upgrade you can manually remove those messages from the database. For example, when the MUC domain is muc.localhost and rdbms_message_format has the default value internal, one can remove such messages with the following query:

DELETE FROM mam_message\n  WHERE direction = 'I' AND remote_bare_jid LIKE 'muc.localhost:%';\n

This can be a heavy operation and it needs to be done with caution.

"},{"location":"migrations/4.1.0_4.2.0/#using-mod_auth_token-with-mysql-and-ms-sql","title":"Using mod_auth_token with MySQL and MS SQL","text":"

The mod_auth_token module supports MySQL and MS SQL now. To use this functionality, you need to create the auth_token table with the query which you can find in priv/mysql.sql and priv/mssql2012.sql, respectively.

"},{"location":"migrations/4.2.0_5.0.0/","title":"4.2.0 to 5.0.0","text":""},{"location":"migrations/4.2.0_5.0.0/#db-migrations","title":"DB migrations","text":"

The migrations scripts for Postgres, MySQL, MSSQL can be found in the priv/migrations directory. Please remember to provide the existing server domain for the server column instead of the localhost.

"},{"location":"migrations/4.2.0_5.0.0/#changes-in-xeps","title":"Changes in XEPs:","text":"
  • mod_last
    • Table last - added server column, updated primary key and indexes.
  • mod_privacy
    • Table privacy_default_list - added server column, updated primary key and indexes.
    • Table privacy_list - added server column, updated primary key and indexes.
  • mod_private
    • Table private_storage - added server column, updated primary key and indexes, removed unused columns.
  • mod_roster
    • Table rosterusers - added server column, updated indexes.
    • Table rostergroups - added server column, updated indexes.
    • Table roster_version - added server column, updated primary key and indexes.
  • mod_muc
    • Table i_muc_light_blocking - updated indexes.
  • mod_inbox
    • Table inbox - updated primary key and indexes.
"},{"location":"migrations/4.2.0_5.0.0/#other-changes","title":"Other changes:","text":"
  • RDBMS auth - modified users table.
  • Added domain_settings table.
  • Added domain_events table.
"},{"location":"migrations/4.2.0_5.0.0/#config-migrations","title":"Config migrations","text":"

Most important changes without which a server might not run.

"},{"location":"migrations/4.2.0_5.0.0/#section-general","title":"Section general","text":"
  • New mandatory option default_server_domain that must be set. This domain is used as a default when one cannot be determined, for example when sending XMPP stream errors to unauthenticated clients.
  • Option hosts is no longer mandatory, but if omitted, at least one host type has to be defined.
  • New option host_types. If omitted, at least one host has to be defined. This is the list of names for the types of hosts that will serve dynamic XMPP domains.

Simply use hosts if you want to stay with static domains and use host_types for dynamic domains.

"},{"location":"migrations/4.2.0_5.0.0/#section-host_config","title":"Section host_config","text":"
  • Option host specifies the XMPP domain that this section refers to.
  • New option host_type specifies the host type that this section refers to. Either this option or host is mandatory.

For every configured host the host_type of the same name is declared automatically. As host_config section is now used for changing configuration of the host_type, we don't need host option any more. But to stay compatible with an old config format we keep host option as well. Now it is just a synonym to host type.

"},{"location":"migrations/4.2.0_5.0.0/#hook-migrations","title":"Hook migrations","text":"

The support for dynamic domains has resulted in changes in most Hooks. Mostly these changes concern calling hooks for a host_type.

"},{"location":"migrations/4.2.0_5.0.0/#added","title":"Added","text":"
  • adhoc_local_commands/4
  • adhoc_sm_commands/4
  • does_user_exist/3
  • get_room_affiliations/2
  • mam_get_behaviour/4
  • mam_set_prefs/6
  • disco_muc_features/1
  • remove_domain/2
  • node_cleanup/1
  • gen_mod:get_module_opt_by_subhost
"},{"location":"migrations/4.2.0_5.0.0/#removed","title":"Removed","text":"
  • host_config_update/4
  • local_send_to_resource_hook/5
  • muc_room_pid/3
  • amp_check_packet/4
  • amp_error_action_triggered/2
  • amp_notify_action_triggered/2
  • room_send_packet/3
  • caps_update/6
  • caps_add/6
"},{"location":"migrations/4.2.0_5.0.0/#changed","title":"Changed","text":"
  • auth_failed/2 -> auth_failed/3
  • failed_to_store_message/2 -> failed_to_store_message/1
  • filter_local_packet/2 -> filter_local_packet/1
  • get_key/3 -> get_key/2
  • register_command/2 -> register_command/1
  • register_subhost/3 -> register_subhost/2
  • resend_offline_messages_hook/3 -> resend_offline_messages_hook/2
  • rest_user_send_packet/5 -> rest_user_send_packet/4
  • set_vcard/4 -> set_vcard/3
  • unregister_command/2 -> unregister_command/1
  • unregister_subhost/2 -> unregister_subhost/1
  • user_ping_timeout/3 -> user_ping_timeout/2
  • user_send_packet/5 -> user_send_packet/4
  • xmpp_stanza_dropped/5 -> xmpp_stanza_dropped/4
  • c2s_broadcast_recipients/6 -> c2s_broadcast_recipients/4
  • c2s_filter_packet/6 -> c2s_filter_packet/4
  • c2s_presence_in/5 -> c2s_presence_in/4
  • check_bl_c2s/2 -> check_bl_c2s/1
  • session_opening_allowed_for_user/3 -> session_opening_allowed_for_user/2
  • privacy_check_packet/6 -> privacy_check_packet/5
  • privacy_get_user_list/3 -> privacy_get_user_list/2
  • privacy_updated_list/4 -> privacy_updated_list/3
  • offline_groupchat_message_hook/5 -> offline_groupchat_message_hook/4
  • offline_message_hook/5 -> offline_message_hook/4
  • set_presence_hook/5 -> set_presence_hook/3
  • sm_broadcast/6 -> sm_broadcast/5
  • sm_filter_offline_message/5 -> sm_filter_offline_message/4
  • sm_remove_connection_hook/6 -> sm_remove_connection_hook/5
  • unset_presence_hook/5 -> unset_presence_hook/3
  • xmpp_bounce_message/2 -> xmpp_bounce_message/1
  • roster_get/3 -> roster_get/2
  • roster_get_jid_info/4 -> roster_get_jid_info/3
  • roster_get_versioning_feature/2 -> roster_get_versioning_feature/1
  • roster_groups/2 -> roster_groups/1
  • roster_in_subscription/6 -> roster_in_subscription/5
  • roster_out_subscription/5 -> roster_out_subscription/4
  • roster_process_item/2 -> roster_process_item/3
  • roster_push/4 -> roster_push/3
  • roster_set/5 -> roster_set/4
  • is_muc_room_owner/4 -> is_muc_room_owner/3
  • can_access_identity/4 -> can_access_identity/3
  • can_access_room/4 -> can_access_room/3
  • mam_archive_id/3 -> mam_archive_id/2
  • mam_archive_size/4 -> mam_archive_size/3
  • mam_get_behaviour/5 -> mam_get_behaviour/4
  • mam_set_prefs/7 -> mam_set_prefs/6
  • mam_remove_archive/4 -> mam_remove_archive/3
  • mam_lookup_messages/3 -> mam_lookup_messages/2
  • mam_archive_message/3 -> mam_archive_message/2
  • mam_muc_archive_id/3 -> mam_muc_archive_id/2
  • mam_muc_archive_size/4 -> mam_muc_archive_size/3
  • mam_muc_get_behaviour/5 -> mam_muc_get_behaviour/4
  • mam_muc_set_prefs/7 -> mam_muc_set_prefs/6
  • mam_muc_remove_archive/4 -> mam_muc_remove_archive/3
  • mam_muc_lookup_messages/3 -> mam_muc_lookup_messages/2
  • mam_muc_archive_message/3 -> mam_muc_archive_message/2
  • mam_muc_flush_messages/3 -> mam_muc_flush_messages/2
  • get_mam_pm_gdpr_data/3 -> get_mam_pm_gdpr_data/2
  • get_mam_muc_gdpr_data/3 -> get_mam_muc_gdpr_data/2
  • get_personal_data/3 -> get_personal_data/2
  • find_s2s_bridge/3 -> find_s2s_bridge/2
  • s2s_allow_host/3 -> s2s_allow_host/2
  • s2s_connect_hook/3 -> s2s_connect_hook/2
  • s2s_receive_packet/2 -> s2s_receive_packet/1
  • disco_local_identity/6 -> disco_local_identity/1
  • disco_sm_identity/6 -> disco_sm_identity/1
  • disco_local_items/6 -> disco_local_items/1
  • disco_sm_items/6 -> disco_sm_items/1
  • disco_local_features/6 -> disco_local_features/1
  • disco_sm_features/6 -> disco_sm_features/1
  • disco_info/5 -> disco_info/1
  • amp_check_condition/4 -> amp_check_condition/3
  • amp_determine_strategy/6 -> amp_determine_strategy/5
  • amp_verify_support/3 -> amp_verify_support/2
  • forget_room/4 -> forget_room/3
  • invitation_sent/7 -> invitation_sent/6
  • join_room/6 -> join_room/5
  • leave_room/6 -> leave_room/5
  • room_packet/6 -> room_packet/5
  • caps_recognised/5 -> caps_recognised/4
  • pubsub_create_node/6 -> pubsub_create_node/5
  • pubsub_delete_node/5 -> pubsub_delete_node/4
  • pubsub_publish_item/7 -> pubsub_publish_item/6
  • mod_global_distrib_known_recipient/5 -> mod_global_distrib_known_recipient/4
"},{"location":"migrations/4.2.0_5.0.0/#metrics-rest-api-obsolete","title":"Metrics REST API (obsolete)","text":"

The API is still considered obsolete so if you are using it, please consider using WombatOAM or metrics reporters as described in Logging and monitoring.

In each endpoint, host has been changed to host_type. This is because the metrics are now collected per host type rather than host.

"},{"location":"migrations/4.2.0_5.0.0/#users-cache","title":"Users cache","text":"

MongooseIM used to feature a cache to check whether a user exists, that was unavoidably enabled, and had no eviction policy, that is, the cache could continue growing forever. Now, MIM features a module called mod_cache_users that implements a configurable cache policy, that can be enabled, disabled, and parametrised, per host_type. This might not be enabled by default in your configuration, so we recommend you verify your configuration and enable it if needed.

"},{"location":"migrations/5.0.0_5.1.0/","title":"5.0.0 to 5.1.0","text":""},{"location":"migrations/5.0.0_5.1.0/#configuration-changes","title":"Configuration changes","text":"

The configuration format has slightly changed and you might need to amend mongooseim.toml.

"},{"location":"migrations/5.0.0_5.1.0/#section-listen","title":"Section listen","text":"

The options tls.verify_peer and tls.verify_mode are replaced with just tls.verify_mode. You need to amend the affected options for each listener:

  • verify_peer = false (the old default for C2S and HTTP listeners) should be replaced with verify_mode = \"none\".
  • verify_peer = true should be replaced with verify_mode = \"peer\" or just removed, as this is the new default.

There is a new, simplified configuration format for mongoose_client_api. You need to change the listen section unless you have disabled the client API in your configuration file. Consult the option description and the example configuration for details.

"},{"location":"migrations/5.0.0_5.1.0/#section-acl","title":"Section acl","text":"

The implicit check for user's domain in patterns is now configurable and the default behaviour (previously undocumented) is more consistent - the check is always performed unless disabled with match = \"all\". See the description of current_domain for more details.

"},{"location":"migrations/5.0.0_5.1.0/#section-auth","title":"Section auth","text":"
  • Each authentication method needs a TOML section, e.g. if you have the rdbms method enabled, you need to have the [auth.rdbms] section in the configuration file, even if it is empty. The methods option is not required anymore and especially if you are using only one method, you can remove it.
  • The auth.scram_iterations option was moved to auth.password.scram_iterations.

See the auth configuration for details.

"},{"location":"migrations/5.0.0_5.1.0/#section-outgoing_pools","title":"Section outgoing_pools","text":"

The option tls.verify_peer is replaced with tls.verify_mode. You need to change this option for each outgoing connection pool:

  • verify_peer = false (the old default for all pools except Riak) should be replaced with verify_mode = \"none\".
  • verify_peer = true should be replaced with verify_mode = \"peer\" or just removed, as this is the new default.

A few options of the outgoing connection pools were changed for consistency:

  • Cassandra servers: ip_address was renamed to host,
  • RabbitMQ: the amqp_ option prefix was removed,
  • LDAP: rootdn was renamed to root_dn; encrypt was removed (the tls option should be used instead).
"},{"location":"migrations/5.0.0_5.1.0/#section-s2s","title":"Section s2s","text":"
  • All options can be set globally or inside host_config.
  • The host_config.s2s section overrides the whole global section now. Previously only the specified options were overridden.
  • The domain_certfile option has been moved to the general section because it affects c2s connections as well.
"},{"location":"migrations/5.0.0_5.1.0/#section-host_config","title":"Section host_config","text":"

The rules for overriding global options in the host_config section have been simplified. The auth section and the s2s.address and s2s.host_policy options now completely override the corresponding general settings instead of being merged with them.

"},{"location":"migrations/5.0.0_5.1.0/#extension-modules","title":"Extension modules","text":"
  • mod_auth_token has a new configuration format - if you are using this module, amend the validity_period option.
  • mod_event_pusher has an updated configuration format - the backend subsection is removed and the http backend has a new handlers option. Adjust your configuration according to mod_event_pusher documentation.
  • mod_mam_meta does not have the rdbms_message_format and simple options anymore. Use db_jid_format and db_message_format instead.
  • mod_shared_roster_ldap all options have their ldap_ prefix dropped.
  • mod_vcard LDAP options are moved into an LDAP subsection.
"},{"location":"migrations/5.0.0_5.1.0/#async-workers","title":"Async workers","text":"

The async_writer flag of MAM is now a section on its own, that absorbs previous flags related to it: flush_interval, max_batch_size and pool_size now become subelements of the async_writer section, with one more parameter, enabled. Below an example:

[modules.mod_mam_meta]\n  flush_interval = 1000\n  max_batch_size = 100\n  muc.async_writer = false\n
now becomes
[modules.mod_mam_meta]\n  async_writer.flush_interval = 1000\n  async_writer.batch_size = 100\n  muc.async_writer.enabled = false\n

"},{"location":"migrations/5.0.0_5.1.0/#smart-markers","title":"Smart markers","text":"

There's an experimental and undocumented module called mod_smart_markers, that had a default table in the RDBMS schema, which you probably never used (or shouldn't have, as it was undocumented). If you rely on this table, the column from_jid has been split in from_luser and lserver, in order to support the remove_domain callback for the dynamic domains functionality. You might need to migrate it, or simply drop the previously defined table and recreate the new one.

"},{"location":"migrations/5.0.0_5.1.0/#inbox","title":"Inbox","text":"

The archive functionality recently introduced has been extended to support many more boxes. IQ queries can remain as they were, but, a new flag called box is now introduced, and if provided, it takes preference over the old archive flag. The database requires a migration, as the archive is now a column storing the proper name of the box, see the migrations for Postgres, MySQL and MSSQL in the priv/migrations directory.

"},{"location":"migrations/5.0.0_5.1.0/#removal-of-deprecated-modules","title":"Removal of deprecated modules","text":"
  • mod_revproxy - removed from the code base as it was unsupported since 4.2.0.
  • mod_aws_sns - its functionality is fully covered by mod_event_pusher.
"},{"location":"migrations/5.0.0_5.1.0/#internal-module-configuration-rework","title":"Internal module configuration rework","text":"

If you are using your own extension modules (or services), you need to update the code. The most important change is that configuration options were stored in proplists before, and now they are stored in maps, so e.g. the start/2 function of your module should expect a map as the second argument.

"},{"location":"migrations/5.1.0_6.0.0/","title":"5.1.0 to 6.0.0","text":""},{"location":"migrations/5.1.0_6.0.0/#module-configuration","title":"Module configuration","text":"
  • The mod_mam_meta module is now named mod_mam for simplicity, so if you are using this module, you need to update the module name in mongooseim.toml.
  • mod_commands, mod_inbox_commands, mod_muc_commands and mod_muc_light_commands are removed. Their functionality is now fully covered by mongoose_admin_api. You need to delete these modules from mongooseim.toml.
"},{"location":"migrations/5.1.0_6.0.0/#metrics","title":"Metrics","text":"

The mod_mam backend module is now named mod_mam_pm for consistency with mod_mam_muc. As a result, the backend metrics have updated names, i.e. each [backends, mod_mam, Metric] name is changed to [backends, mod_mam_pm, Metric], where Metric can be lookup or archive.

"},{"location":"migrations/5.1.0_6.0.0/#rest-api","title":"REST API","text":"

The whole REST API has been unified and simplified. There are now only two REST API handlers that you can configure in the listen section of mongooseim.toml:

  • mongoose_admin_api handles the administrative API,
  • mongoose_client_api handles the client-facing API.

You need to remove the references to the obsolete handlers (mongoose_api_client, mongoose_api_admin, mongoose_api, mongoose_domain_handler) from your configuration file.

Additionally, all the backend administration endpoints for mod_muc_light require now XMPPMUCHost (MUC subdomain) instead of XMPPHost (domain) and roomID instead of roomName.

For some endpoints, the response messages may be slightly different because of the unification with other APIs.

"},{"location":"migrations/5.1.0_6.0.0/#command-line-interface","title":"Command Line Interface","text":"

For some commands, the response messages may be slightly different because of the unification with other APIs.

"},{"location":"migrations/5.1.0_6.0.0/#dynamic-domains","title":"Dynamic domains","text":"

Removing a domain was a potentially troublesome operation: if the removal was to fail midway through the process, retrials wouldn't be accepted. This is fixed now, by first disabling and marking a domain for removal, then running all the handlers, and only on full success will the domain be removed. So if any failure is notified, the whole operation can be retried again.

The database requires a migration, as the status of a domain takes now more than the two values a boolean allows, moreover the table for data of the domain admin has been added. See the migrations for Postgres, MySQL and MSSQL in the priv/migrations directory.

"},{"location":"migrations/5.1.0_6.0.0/#hooks","title":"Hooks","text":"

Support for ejabberd_hooks has been removed. Now handlers should be compliant with gen_hook. If you have some custom modules (e.g. that implement some XMPP extensions) and use hooks mechanism, please refactor your handlers to be compliant with it. For more details refer to Hooks and Handlers chapter.

"},{"location":"migrations/6.0.0_6.1.0/","title":"6.0.0 to 6.1.0","text":""},{"location":"migrations/6.0.0_6.1.0/#listener-configuration","title":"Listener configuration","text":"

With the new implementation of the client-to-server (C2S) state machine, mongoose_c2s, there are some changes in the listener options:

  • The zlib option for supporting stream compression, which was present in the default configuration file, is removed, and you need to delete it from your listener configuration unless you have already done so. The extension is obsolete due to security vulnerability, and the CRIME vulnerability has been found a long time ago.
  • Support for [listen.http.handlers.mod_websockets.service] has been removed, the component connection over WebSockets did not correspond to any XEP/RFC, and neither it was properly described anywhere in the MIM documentation. It was present in the default configuration file, and you need to delete it from your listener configuration unless you have already done so.
  • The max_fsm_queue option is no longer supported for C2S listeners. It is incompatible with the new gen_statem state machine, and if you need to limit incoming traffic, you should use traffic shapers instead. You need to remove this option from your C2S configuration if you are using it.
  • The default value of the backlog option for all XMPP listeners has been increased from 100 to 1024 for performance reasons.
  • You might be interested in the new C2S listener options: max_connections, c2s_state_timeout, reuse_port and backwards_compatible_session. The first two options can be set for websockets as well.
"},{"location":"migrations/6.0.0_6.1.0/#module-configuration","title":"Module configuration","text":"

The mongoose_c2s module, which provides the core XMPP features, is now separated from modules which used to have their parts hardcoded into the old C2S implementation:

  • Presence handling has been exported to a separate module mod_presence, which is enabled in the default configuration file. Enable mod_presence in your configuration file unless you are sure that you don't need server-side presence handling, in which case you could gain some performance by not using this module.
  • Stream management is now handled completely by mod_stream_management, and if you don't need it, you can now gain more performance than before by disabling it.
  • Client state indication is now handled completely by mod_csi, and if you don't need it, you can now gain more performance than before by disabling it.
"},{"location":"migrations/6.0.0_6.1.0/#database-migration","title":"Database migration","text":"

There is no database migration required when upgrading from version 6.0.0 to 6.1.0.

"},{"location":"migrations/6.0.0_6.1.0/#metrics","title":"Metrics","text":"

The c2s_unauthenticated_iq metric has been removed.

Since we don't know whether a compressed/encrypted packet contains a single stanza or a batch, calculation of histogram metrics for compressed and encrypted streams is inaccurate. The following histogram metrics have been removed: * global.data.xmpp.received.compressed_size - obsolete, stream compression support is removed. * global.data.xmpp.sent.compressed_size - obsolete, stream compression support is removed. * global.data.xmpp.received.encrypted_size - impractical, has no value but consumes calculation resources. * global.data.xmpp.sent.encrypted_size - impractical, has no value but consumes calculation resources.

A set of global.data.xmpp.received.** and global.data.xmpp.sent.** spiral data metrics has been introduced instead.

"},{"location":"migrations/6.0.0_6.1.0/#hooks","title":"Hooks","text":"

Multiple hooks have been added, removed or changed because of the introduction of mongoose_c2s - the most important change is the increased granularity of the user_send_* and user_receive_* hooks. If you have some custom modules (e.g. that implement some XMPP extensions) using the hooks mechanism, please refactor your handlers to be compliant with the new hooks. Refer to Hooks Description and Message routing for more details.

"},{"location":"migrations/6.0.0_6.1.0/#upgrade-procedure","title":"Upgrade procedure","text":"

As long as you are not using Mnesia for persistent storage (it is not recommended to do so), the safest option would be to prepare a new cluster with version 6.1.0, and switch the traffic to it on a load balancer. The only downside is that clients connected to different clusters would see each other as offline. If you are limited to one cluster, it is recommended to do a split-cluster rolling upgrade by removing each node from the cluster before stopping and upgrading it, and gradually forming a new separate cluster from the upgraded nodes. This means that for each newly started node except the first one, you should join one of the previously started nodes.

"},{"location":"migrations/6.0.0_6.1.0/#rolling-upgrade-issues","title":"Rolling upgrade issues","text":"

If you want to perform a typical rolling upgrade instead, there are a few potential issues caused by the introduction of mongoose_c2s. When a node is stopped, upgraded and started again, it reconnects to the cluster. When a stanza is routed between users connected to different nodes of the cluster, an internal message is sent between the nodes. In version 6.1.0 that message has a different format, and routing a stanza between nodes with versions 6.0.0 and 6.1.0 would fail, resulting in a warning message for each stanza. This means that after upgrading the first node you might get a huge amount of warning messages on all nodes, causing a performance drop. What is more, the sender's node would still assume that the recipient is online, and some actions (e.g. responding with the service-unavailable error) would be omitted.

Changing the log level

You can set the log level to error during the upgrade to silence the excess warnings. Before the upgrade, set the log level on all nodes:

mongooseimctl server setLoglevel --level ERROR\n
Before starting the upgraded node, set the loglevel option to error in the configuration file. After the whole upgrade procedure, use mongooseimctl to change the log level back to the previous value (warning by default). Change the values in the configuration files as well to make the setting permanent.

"},{"location":"migrations/6.1.0_6.2.0/","title":"6.1.0 to 6.2.0","text":""},{"location":"migrations/6.1.0_6.2.0/#internal-databases","title":"Internal databases","text":"

So far MongooseIM has been using the internal Mnesia database to replicate the in-memory data between cluster nodes. Now there is an option to use CETS instead. Mnesia is still used by default, so you don't need to change your configuration file. If you want to switch to CETS, see the tutorial and the reference for internal_databases.

"},{"location":"migrations/6.1.0_6.2.0/#database-migration","title":"Database migration","text":"

There is a new table discovery_nodes in the database, which is used by CETS for dynamic discovery of cluster nodes. See the migrations for Postgres, MySQL and MSSQL in the priv/migrations directory. Although the new table is only needed by CETS, we recommend applying the migration anyway to keep the database in sync with the latest schema.

"},{"location":"migrations/6.1.0_6.2.0/#validation-of-tls-options","title":"Validation of TLS options","text":"

Erlang/OTP 26 has more strict checking of the TLS options, as described in release highlights. MongooseIM follows the same rules now, preventing runtime crashes if TLS is misconfigured.

By default verify_mode is set to \"peer\" for each tls section in the configuration, and this requires cacertfile - otherwise the server will refuse to start. This was already documented, but not enforced. The option \"selfsigned_peer\" also requires cacertfile now.

This change affects the following configuration sections:

  • Listeners. Currently, it only affects http and c2s with tls.module set to \"just_tls\", but we recommend fixing it for all listeners already, because in future releases all listeners would have this validation.
  • Outgoing connections.

For each of the affected sections, if there is any tls option present, make sure that either tls.cacertfile is provided or tls.verify_mode is set to \"none\".

"},{"location":"migrations/6.1.0_6.2.0/#transition-to-new-cli-commands","title":"Transition to New CLI Commands","text":"

Legacy CLI commands previously marked as deprecated have now been removed. The users are encouraged to explore the new GraphQL-based CLI. It is recommended to transition to the new CLI commands prior to the next system upgrade. The configuration options general.mongooseimctl_access_commands and services.service_admin_extra related to the legacy CLI were also removed. You need to remove them from your configuration file unless you have already done so.

"},{"location":"migrations/6.1.0_6.2.0/#removed-support-for-riak","title":"Removed support for Riak","text":"

The deprecated and obsolete Riak database is not supported anymore, and you cannot configure it in the outgoing_pools section.

"},{"location":"migrations/6.2.0_6.2.1/","title":"6.2.0 to 6.2.1","text":""},{"location":"migrations/6.2.0_6.2.1/#database-migration","title":"Database migration","text":"

The migration scripts for PostgreSQL, MySQL and MS SQL are in the priv/migrations directory. They are required due to the following changes:

"},{"location":"migrations/6.2.0_6.2.1/#mam-message-improvements","title":"MAM message improvements","text":"

There is a new column in the mam_message table, which is used to support including or excluding groupchat results in a user archive (mod_mam). Please be aware, that the filtering process will only be effective for new messages and will not apply to those messages that have already been stored in the database.

"},{"location":"migrations/6.2.0_6.2.1/#roster","title":"Roster","text":"

mod_roster was internally refactored to modernise and improve the performance of the code, but as a side-effect, some database migrations need to be carried.

"},{"location":"migrations/6.2.0_6.2.1/#cets-node-discovery","title":"CETS node discovery","text":"

The discovery_nodes table used by the CETS internal database has been updated - now each node name can appear only once, while in the past a node could be a part of multiple clusters. Manual intervention might be needed if there are nodes that belong to more than one cluster.

"},{"location":"migrations/6.2.0_6.2.1/#entity-capabilities","title":"Entity capabilities","text":"

mod_caps has a new RDBMS backend, making it possible to use it with CETS. As a result, a new table caps is added to the DB schema.

"},{"location":"migrations/6.2.0_6.2.1/#configuration-changes-outgoing-pools","title":"Configuration changes: outgoing pools","text":"

The outgoing connections option host is now named host_type, see outgoing pools for more information.

The option single_host for the scope has been deprecated, in favour of configuring the specified pools within the host_config section.

"},{"location":"migrations/6.2.0_6.2.1/#functional-changes-presences","title":"Functional changes: presences","text":"

mod_presence was internally refactored to modernise and improve the performance of the code, but as a side-effect, code for XEP-0018 was removed. Note that this XEP was not advertised and as a matter of fact was deprecated already in 2003, so if your client was depending on it, it is high time to update.

"},{"location":"migrations/6.2.0_6.2.1/#upgrade-procedure","title":"Upgrade procedure","text":"

The standard migration procedure is to stop the cluster, apply the DB migrations, and start the new version of the cluster.

Should you require no downtime, you could apply the DB migration first, and then perform the rolling upgrade procedure - either manually or using helm or kubectl. If you are using CETS, the restarted nodes will stay disconnected from the ones still running the previous version, causing transient connectivity issues between the end users connected to different parts of the cluster. This is due to changes in the internal CETS API.

"},{"location":"migrations/6.2.1_x.x.x/","title":"6.2.1 to x.x.x","text":""},{"location":"migrations/6.2.1_x.x.x/#hooks","title":"Hooks","text":"

Hook names have been unified by removing the _hook prefix from the few hooks which used it, e.g. offline_message_hook is now called offline_message. This change affects the hook metric names as well.

"},{"location":"migrations/jid-from-mam-muc-script/","title":"MAM MUC migration helper","text":""},{"location":"migrations/jid-from-mam-muc-script/#the-purpose-of-sender-jid-from-mam-messageescript","title":"The purpose of sender-jid-from-mam-message.escript","text":"

This script may be used as a part of migration from MongooseIM 3.3.0 (or older). It is able to extract a JID of a groupchat message sender from an XML payload. This piece of information is essential for GDPR commands (retrieve data and remove user) to work properly, as without it the operations on MAM MUC data in DB would be extremely inefficient.

Please consult \"3.3.0 to...\" migration guide for details. DB-specific sections describe where the payloads are stored and what you should do with the extracted JID.

"},{"location":"migrations/jid-from-mam-muc-script/#requirements","title":"Requirements","text":"

This script may be executed in every *nix environment which has OTP 19.0 (or newer) installed and escript executable is in PATH.

It doesn't depend on any MongooseIM code or library, so it may be used as a standalone file.

"},{"location":"migrations/jid-from-mam-muc-script/#how-to-use","title":"How to use?","text":"

sender-jid-from-mam-message.escript (eterm | xml)

The only parameter required by the script is the input format.

You should use eterm if (in MongooseIM config file):

  • You haven't set db_message_format option for MAM at all.
  • db_message_format is set to mam_message_compressed_eterm or mam_message_eterm

You should use the xml option if:

  • db_message_format is set to mam_message_xml.

Once started, the script will run in an infinite loop (until killed or interrupted), expecting a stream of inputs. For every provided payload, a JID will be returned immediately. All communication with the script is done via stdio.

"},{"location":"migrations/jid-from-mam-muc-script/#input-format","title":"Input format","text":"

For both eterm and xml mode, the script expects an input in a very similar format. The high-level overview is:

LENGTH\\nPAYLOAD\n
  • LENGTH is the PAYLOAD length in bytes; if the data retrieved from a DBMS is a Unicode string, LENGTH is equal to the number of bytes used to encode this string
  • PAYLOAD is a sequence of bytes; if a DBMS returns binary data encoded as hex, then it has to be decoded to raw bytes
  • LENGTH and PAYLOAD are separated with a newline character (ASCII code 10 / 0x0a)
"},{"location":"migrations/jid-from-mam-muc-script/#output-format","title":"Output format","text":"

The script output format is very similar to the input:

LENGTH\\nJID\n
  • LENGTH is the number of bytes in a JID
  • JID is a sequence of bytes, which encodes a Unicode string
  • LENGTH and PAYLOAD are separated with a newline character (ASCII code 10 / 0x0a)

In case of an error (that is not a critical error, like I/O failure), script will print -N\\n (where N is an error code) and will continue to work. Technically it's -N for LENGTH, followed by a newline character and no PAYLOAD part (or 0-length PAYLOAD if you like). The following error codes are supported: * -1\\n - Unknown error. Something went wrong with the JID extraction (most likely malformed input). * -2\\n - Invalid message type. The message / stanza has been decoded successfully, but it's not a groupchat message.

"},{"location":"migrations/jid-from-mam-muc-script/#examples","title":"Examples","text":"

tools/migration folder contains two files: sender-jid-from-mam-message.example.eterm and sender-jid-from-mam-message.example.xml. They are input samples for the script and may be used as a reference for the script usage.

You can test them by running:

  • tools/migration/sender-jid-from-mam-message.escript eterm < sender-jid-from-mam-message.example.eterm > out
  • tools/migration/sender-jid-from-mam-message.escript xml < sender-jid-from-mam-message.example.xml > out

In both cases the out file should have the following content:

37\ng\u017ceg\u017c\u00f3\u0142ka@brz\u0119czyszczykiewicz.pl\n
"},{"location":"migrations/jid-from-mam-muc-script/#debug","title":"Debug","text":"

If an environment variable DEBUG is set to 1, the script will store error messages in a /tmp/script-debug file.

"},{"location":"modules/mod_adhoc/","title":"mod_adhoc","text":""},{"location":"modules/mod_adhoc/#module-description","title":"Module Description","text":"

This module implements XEP-0050: Ad-Hoc Commands. It allows XMPP entities to remotely execute various commands using forms.

"},{"location":"modules/mod_adhoc/#options","title":"Options","text":""},{"location":"modules/mod_adhoc/#modulesmod_adhociqdisctype","title":"modules.mod_adhoc.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_adhoc/#modulesmod_adhocreport_commands_node","title":"modules.mod_adhoc.report_commands_node","text":"
  • Syntax: boolean
  • Default: false
  • Example: report_commands_node = true

Determines whether the Ad-Hoc Commands should be announced upon Service Discovery.

"},{"location":"modules/mod_adhoc/#example-configuration","title":"Example configuration","text":"
[modules.mod_adhoc]\n  report_commands_node = true\n
"},{"location":"modules/mod_amp/","title":"mod_amp","text":""},{"location":"modules/mod_amp/#module-description","title":"Module Description","text":"

This module enables support for a subset of the functionality described under XEP-0079: Advanced Message Processing. It currently does not provide features related to timed delivery, i.e the expire-at condition.

The error and notify actions are supported, while alert and drop are not. See more below, under XEP Support.

"},{"location":"modules/mod_amp/#options","title":"Options","text":"

None.

"},{"location":"modules/mod_amp/#example-configuration","title":"Example Configuration","text":"
[modules.mod_amp]\n
"},{"location":"modules/mod_amp/#xep-support","title":"XEP Support","text":"

What follows is a short description of which parts of the XEP-0079 specification mod_amp supports.

2.1.1 Service Discovery

  • Both the service discovery information response (Ex.1, 2) and the request/response for individual actions and conditions (Ex.3, 4) are supported.

2.1.2 Specifying Semantics

  • \"Per-hop\" rule semantics are not supported, i.e. ignored.

2.2 Server Processing

  • 2.2.1 Validating Semantics: Performed as in the XEP. The first message to fail validation determines the error message.
  • 2.2.2 supported to spec.
  • 2.2.3 supported to spec.
  • 2.2.4 supported for actions: error and notify.
  • 2.2.5 supported for events: error and notify.

3.3 Defined Conditions

  • 3.3.1 deliver: supported for values: direct, stored, and none. The stored condition works with mod_mam and mod_offline.

    Note

    If both mod_mam and mod_offline are enabled, some delivery conditions may not work correctly.

  • 3.3.2 expire-at: not supported

  • 3.3.3 match-resource: supported

3.4 Defined Actions

  • 3.4.1 alert: not supported
  • 3.4.2 drop: not supported
  • 3.4.3 error: supported
  • 3.4.4 notify: supported. Notifications for the stored and direct conditions are sent as soon as the message has been stored or sent to the recipient.

6. Error Handling

  • 6.2.1 Unsupported Action: supported
  • 6.2.2 Unsupported Condition: supported
  • 6.2.3 Not Acceptable: supported
  • 6.2.4 Service Unavailable is not supported, as it pertains to \"per-hop\" rule processing
  • 6.2.5 Undefined Condition: supported

8. Stream Feature

  • supported

9. Security Considerations

  • Currently, the security measures described in this section have not been implemented. It follows that mod_amp, in its current state, should only be enabled for servers/domains where user presence leaks are not a threat, i.e services where all users can see each other's presence by default.
"},{"location":"modules/mod_amp/#modifications","title":"Modifications","text":"

The following behaviour differs from or extends the guidelines provided in the XEP.

  • The action for the deliver condition with value stored is deferred until the message is stored by mod_mam or mod_offline.
  • The action for the deliver condition with value direct is deferred until the message is sent to the recipient's socket.
"},{"location":"modules/mod_amp/#server-processing-details","title":"Server Processing Details","text":"

When a message with AMP rules is being processed by the server, several system events may occur. For a given event, the rules are processed and each of them can get the matched or undecided status or, if the conditions are not met, it gets no status. If any rules get the matched status, the action for the first of them is performed. After that, the rule list is filtered so that only the undecided ones are left in the message, as they may be matched later.

The following system events are defined:

  • initial check - always occurs first, when the message enters the system.
  • mod_mam failed - mod_mam is enabled but fails to store the message.
  • mod_offline failed - the recipient is offline and mod_offline is enabled but fails to store the message.
  • archived - either mod_mam or mod_offline has successfully stored the message.
  • delivery failed - the message was about to be delivered, but it could not be sent.
  • delivered - the message has been sent to the recipient. Mutually exclusive with delivery failed.

Rule status is determined for each system event in the following way:

  • initial check

    • If the recipient is online, rules for the direct and none values of the deliver condition become undecided, except rules for the direct value with action error or drop, which become matched. If mod_mam is enabled, rules for the stored value of the deliver condition become undecided.
      • If the recipient has a session for the target resource, rules for the exact and any values of the match-resource condition become matched.
      • Otherwise, rules for the other and any values of the match-resource condition become matched.
    • If the recipient is offline:
      • If mod_mam or mod_offline is enabled, rules for the stored and none values of the deliver conditions become undecided, except rules for the stored value with action error or drop, which become matched.
      • If both mod_mam and mod_offline are disabled, rules for the none delivery condition become matched.
  • mod_mam failed

    • If the recipient is online, rules for direct and none values of the deliver condition become undecided.
    • If the recipient is offline, rules for the none value of the deliver condition become matched.
  • mod_offline failed

    • Rules for the none value of the deliver condition become matched.
  • archived

    • If the recipient is online, rules for direct and stored values of the deliver condition become undecided.
    • If the recipient is offline, rules for the stored value of the deliver condition become matched.
  • delivery failed

    • Rules for the none and stored value of the deliver condition become matched.
  • delivered

    • Rules for the direct value of the deliver condition become matched.
"},{"location":"modules/mod_auth_token/","title":"mod_auth_token","text":""},{"location":"modules/mod_auth_token/#module-description","title":"Module Description","text":"

This module implements handling of tokens in an OAuth-like authentication scheme. It provides services necessary to:

  • deserialize/serialize binary tokens received and issued by the server,
  • validate incoming binary tokens, i.e.:
    • check integrity using Message Authentication Codes (MAC) with server-side stored user keys,
    • check validity against the configured validity duration times,
    • check revocation status,
  • handle token requests from logged in users.

The module itself does not implement protocol related details - these are implemented in cyrsasl.erl. Generation of keys necessary to sign binary tokens is delegated to module mod_keystore.erl.

"},{"location":"modules/mod_auth_token/#options","title":"Options","text":""},{"location":"modules/mod_auth_token/#modulesmod_auth_tokenbackend","title":"modules.mod_auth_token.backend","text":"
  • Syntax: non-empty string
  • Default: \"rdbms\"
  • Example: backend = \"rdbms\"

Token storage backend. Currently only \"rdbms\" is supported.

"},{"location":"modules/mod_auth_token/#modulesmod_auth_tokeniqdisctype","title":"modules.mod_auth_token.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming IQ stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_auth_token/#modulesmod_auth_tokenvalidity_period","title":"modules.mod_auth_token.validity_period","text":"
  • Syntax: TOML table. Each key is either access or refresh. Each value is a nested TOML table with the following mandatory keys: value (non-negative integer) and unit (\"days\", \"hours\", \"minutes\" or \"seconds\").
  • Default: {access = {value = 1, unit = \"hours\"}, refresh = {value = 25, unit = \"days\"}}
  • Example: validity_period.access = {value = 30, unit = \"minutes\"}

Validity periods of access and refresh tokens can be defined independently - specifying one of them does not change the default value for the other one. Validity period configuration for provision tokens happens outside the module since the server does not generate provision tokens - it only validates them.

"},{"location":"modules/mod_auth_token/#required-keys","title":"Required keys","text":"

To read more about the keys MongooseIM makes use of, please refer to mod_keystore documentation.

"},{"location":"modules/mod_auth_token/#token-types","title":"Token types","text":"

Three token types are supported:

  • access tokens: These are short lived tokens which grants aren't tracked by the server (i.e. there's no need to store anything in a database). Access tokens can be used as a payload for the X-OAUTH authentication mechanism and grant access to the system. Access tokens can't be revoked. An access token is valid only until its expiry date is reached.

  • refresh tokens: These are longer lived tokens which are tracked by the server and therefore require persistent storage in a relational database. Refresh tokens can be used as a payload for the X-OAUTH authentication mechanism and to grant access to the system. Also they can result in a new set of tokens being returned upon successful authentication. They can be revoked - if a refresh token hasn't been revoked, it is valid until it has expired. On revocation, it immediately becomes invalid. As the server stores information about granted tokens, it can also persistently mark them as revoked.

  • provision tokens: These tokens are generated by a service external to the server. They grant the owner a permission to create an account. A provision token may contain information which the server can use to provision the VCard for the newly created account. Using a provision token to create an account (and inject VCard data) is done similarly to other token types, i.e. by passing it as payload for the X-OAUTH mechanism. The XMPP server has no way of tracking and revoking provision tokens, as they come from an outside source.

"},{"location":"modules/mod_auth_token/#token-serialization-format","title":"Token serialization format","text":"

All tokens (access, refresh, provision) are to be exchanged as Base64 encoded binary data. Serialization format of the token before encoding with Base64 is dependent on its type:

'access' \\0 <BARE_JID> \\0 <EXPIRES_AT> \\0 <MAC>\n\n'refresh' \\0 <BARE_JID> \\0 <EXPIRES_AT> \\0 <SEQUENCE_NO> \\0 <MAC>\n\n'provision' \\0 <BARE_JID> \\0 <EXPIRES_AT> \\0 <VCARD> \\0 <MAC>\n

For example (these tokens are randomly generated, hence field values don't make much sense - line breaks are inserted only for the sake of formatting,<vCard/> inner XML is snipped):

'access' \\0 Q8@localhost \\0 64875466454\n    \\0 0acd0a66d06934791d046060cf9f1ad3c2abb3274cc7e7d7b2bc7e2ac4453ed774b6c6813b40ebec2bbc3774d59d4087\n\n'refresh' \\0 qp@localhost \\0 64875466457 \\0 6\n    \\0 8f57cb019cd6dc6e7779be165b9558611baf71ee4a40d03e77b78b069f482f96c9d23b1ac1ef69f64c1a1db3d36a96ad\n\n'provision' \\0 Xmi4@localhost \\0 64875466458 \\0 <vCard>...</vCard>\n    \\0 86cd344c98b345390c1961e12cd4005659b4b0b3c7ec475bde9acc9d47eec27e8ddc67003696af582747fb52e578a715\n
"},{"location":"modules/mod_auth_token/#requesting-access-or-refresh-tokens-when-logged-in","title":"Requesting access or refresh tokens when logged in","text":"
<iq type='get' to='john@localhost' id='123'>\n    <query xmlns='erlang-solutions.com:xmpp:token-auth:0'/>\n</iq>\n

To request access and refresh tokens for the first time a client should send an IQ stanza after they have successfully authenticated for the first time using some other method.

"},{"location":"modules/mod_auth_token/#token-response-format","title":"Token response format","text":"

Requested tokens are being returned by the server wrapped in IQ stanza with the following fields:

  • id: value taken from the request IQ stanza
  • type: result
  • from: bare user JID
  • to: full user JID

Example response (encoded tokens have been truncated in this example):

<iq  id='123' type='result' from='john@localhost' to='john@localhost/res1'>\n    <items xmlns='erlang-solutions.com:xmpp:token-auth:0'>\n        <access_token>cmVmcmVzaAGQ1Mzk1MmZlYzhkYjhlOTQzM2UxMw==</access_token>\n        <refresh_token>cmVmcmVzaAGQ1Mzk1MmZlYzhkYjhlOTQzM2UxMw==</refresh_token>\n    </items>\n</iq>\n

Once a client has obtained a token, they may start authenticating using the X-OAUTH SASL mechanism when reaching the authentication phase of an XMPP connection initiation.

"},{"location":"modules/mod_auth_token/#login-with-access-or-refresh-token","title":"Login with access or refresh token","text":"

In order to log into the XMPP server using a previously requested token, a client should send the following stanza:

<auth xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\" mechanism=\"X-OAUTH\">\ncmVmcmVzaAGQ1Mzk1MmZlYzhkYjhlOTQzM2UxMw== \n</auth>\n

The Base64 encoded content is a token obtained prior to authentication. Authentication will succeed unless the used tokens are expired, revoked, or the keys required for MAC verification could not be found by the server.

When using a refresh token to authenticate with the server, the server will respond with a new access token:

<success xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\">\ncmVmcmVzaAGQ1Mzk1MmZlYzhkYjhlOTQzM2UxMw==\n</success>\n

The above response is to be expected unless the refresh token used is expired or there were some problems processing the key on the server side.

"},{"location":"modules/mod_auth_token/#token-revocation-using-command-line-tool","title":"Token revocation using command line tool","text":"

Refresh tokens issued by the server can be used to:

  • log in a user: as an authentication valet,
  • request a new access token with refreshed expiry date.

An administrator may revoke a refresh token:

mongooseimctl token revokeToken --user owner@xmpphost\n

A client can no longer use a revoked token either for authentication or requesting new access tokens. After a client's token has been revoked, in order to obtain a new refresh token a client has to log in using some other method.

Caveat: as of now, the user's session is not terminated automatically on token revocation. Therefore, the user might request a new set of tokens for as long as the session is active, even though their previous token was just revoked (possibly due to a breach / token leak). Moreover, an access token still kept on a compromised device can be used to establish a new session for as long as it's valid - access tokens can't be revoked. To alleviate rerequesting tokens by the user, an operator can use mod_admin extension allowing to terminate the user's connection. Access token validity can't be sidestepped right now.

"},{"location":"modules/mod_auth_token/#example-configuration","title":"Example configuration","text":"
[modules.mod_auth_token]\n  validity_period.access = {value = 13, unit = \"minutes\"}\n  validity_period.refresh = {value = 13, unit = \"days\"}\n
"},{"location":"modules/mod_bind2/","title":"mod_bind2","text":""},{"location":"modules/mod_bind2/#module-description","title":"Module Description","text":"

Implements XEP-0386: Bind 2.

"},{"location":"modules/mod_blocking/","title":"mod_blocking","text":""},{"location":"modules/mod_blocking/#module-description","title":"Module Description","text":"

This module implements XEP-0191: Blocking command. The extension allows blocking the whole communication with a user (or a group of users) with a single command. The protocol is much simpler than privacy lists.

"},{"location":"modules/mod_blocking/#options","title":"Options","text":"

None.

"},{"location":"modules/mod_blocking/#example-configuration","title":"Example Configuration","text":"
[modules.mod_blocking]\n

The module is not configurable because internally it is an interface to privacy lists, so settings like storage backend apply to it automatically.

Issuing a blocking command creates a privacy list named \"blocking\" (if it didn't exist), adds to it items being blocked and sets this list as the default. Unblocking contacts removes them from \"blocking\" privacy list.

If the user has other online resources which use privacy lists it may result in a different behaviour per resource; this is normal, and provided for in XEP.

Similar to privacy lists, a blocked contact sees the user as offline no matter what their real status is.

If the contact being blocked is subscribed to the user's presence, they receive an \"unavailable\" presence; when unblocked, they receive the current status of the user.

"},{"location":"modules/mod_bosh/","title":"mod_bosh","text":""},{"location":"modules/mod_bosh/#module-description","title":"Module Description","text":"

This module implements XEP-0206: XMPP Over BOSH (using XEP-0124: Bidirectional-streams Over Synchronous HTTP (BOSH)), allowing clients to connect to MongooseIM over regular HTTP long-lived connections.

If you want to use BOSH, you must enable it both in the listen section of mongooseim.toml (Listener Modules) and as a module.

"},{"location":"modules/mod_bosh/#options","title":"Options","text":""},{"location":"modules/mod_bosh/#modulesmod_boshbackend","title":"modules.mod_bosh.backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"

Backend to use for storing BOSH connections.

Warning

The corresponding internal database has to be enabled.

"},{"location":"modules/mod_bosh/#modulesmod_boshinactivity","title":"modules.mod_bosh.inactivity","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 30
  • Example: inactivity = 30

Maximum allowed inactivity time (in seconds) for a BOSH connection. Please note that a long-polling request is not considered to be an inactivity.

"},{"location":"modules/mod_bosh/#modulesmod_boshmax_wait","title":"modules.mod_bosh.max_wait","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_wait = 30

This is the longest time (in seconds) that the connection manager will wait before responding to any request during the session.

"},{"location":"modules/mod_bosh/#modulesmod_boshserver_acks","title":"modules.mod_bosh.server_acks","text":"
  • Syntax: boolean
  • Default: false
  • Example: server_acks = true

Enables/disables acks sent by server.

"},{"location":"modules/mod_bosh/#modulesmod_boshmax_pause","title":"modules.mod_bosh.max_pause","text":"
  • Syntax: positive integer
  • Default: 120
  • Example: max_pause = 30

Maximum allowed pause in seconds (e.g. to switch between pages and then resume connection) to request by client-side.

"},{"location":"modules/mod_bosh/#example-configuration","title":"Example Configuration","text":"

In the listener section:

[[listen.http]]\n  port = 5280\n  transport.num_acceptors = 10\n  transport.max_connections = 1024\n\n  [[listen.http.handlers.mod_bosh]]\n    host = \"_\"\n    path = \"/http-bind\"\n

In the module section:

[modules.mod_bosh]\n  inactivity = 20\n  max_wait = \"infinity\"\n  server_acks = true\n  max_pause = 120\n
"},{"location":"modules/mod_cache_users/","title":"mod_cache_users","text":""},{"location":"modules/mod_cache_users/#module-description","title":"Module Description","text":"

mod_cache_users is a module that caches whether a user exists, and possibly stores metadata assigned to them. This is useful for example to decide if a message should be stored in MAM or Inbox \u2014 for example, the receiver might not exist, so no message should be stored in his archive nor his inbox.

This cache uses segmented cache under the hood, for more details, read the library documentation.

"},{"location":"modules/mod_cache_users/#options","title":"Options","text":""},{"location":"modules/mod_cache_users/#modulesmod_cache_usersstrategy","title":"modules.mod_cache_users.strategy","text":"
  • Syntax: string, one of fifo or lru
  • Default: fifo
  • Example: strategy = \"lru\"

Eviction strategy for the cache. FIFO is simply a queue, that ensures records will eventually be evicted and require reloading; LRU ensures queried records keep moving to the front of the queue, possibly keeping them alive forever.

"},{"location":"modules/mod_cache_users/#modulesmod_cache_userstime_to_live","title":"modules.mod_cache_users.time_to_live","text":"
  • Syntax: integer, in minutes, or the string \"infinity\"
  • Default: 8 * 60 (8h)
  • Example: time_to_live = 480

Time between rotations, that is, the time a single table will live. A record that is inserted in the first table will live as long as this ttl multiplied by the number of tables.

"},{"location":"modules/mod_cache_users/#modulesmod_cache_usersnumber_of_segments","title":"modules.mod_cache_users.number_of_segments","text":"
  • Syntax: integer
  • Default: 3
  • Example: number_of_segments = 3

Number of segments the cache has. The more segments there are, the more fine-grained the cache can be, but the slower queries will be: query the cache checks the tables in order until a match is found.

"},{"location":"modules/mod_cache_users/#example-configuration","title":"Example configuration","text":"
[modules.mod_cache_users]\n  strategy = \"lru\"\n  time_to_live = 60\n  number_of_segments = 1\n
"},{"location":"modules/mod_caps/","title":"mod_caps","text":""},{"location":"modules/mod_caps/#module-description","title":"Module description","text":"

This module provides a presence-based mechanism for exchanging information about entity capabilities as defined in XEP-0115: Entity Capabilities. Additionally, it filters out PEP messages that the recipient declared (in announced caps) being not capable of handling. It is not this module's responsibility to intercept and answer disco requests routed between clients.

"},{"location":"modules/mod_caps/#options","title":"Options","text":"

This module expects two optional arguments that apply to cache tab:

"},{"location":"modules/mod_caps/#modulesmod_capscache_size","title":"modules.mod_caps.cache_size","text":"
  • Syntax: positive integer
  • Default: 1000
  • Example: cache_size = 2000

The size of a cache_tab (the amount of entries) holding the information about capabilities of each user.

"},{"location":"modules/mod_caps/#modulesmod_capscache_life_time","title":"modules.mod_caps.cache_life_time","text":"
  • Syntax: positive integer
  • Default: 86_400 (24 hours)
  • Example: cache_life_time = 10_000

Time (in seconds) after which entries will be removed.

"},{"location":"modules/mod_caps/#modulesmod_capsbackend","title":"modules.mod_caps.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\".
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"
"},{"location":"modules/mod_caps/#example-configuration","title":"Example Configuration","text":"
[modules.mod_caps]\n  cache_size = 2000\n  cache_life_time = 10_000\n
"},{"location":"modules/mod_carboncopy/","title":"mod_carboncopy","text":""},{"location":"modules/mod_carboncopy/#module-description","title":"Module Description","text":""},{"location":"modules/mod_carboncopy/#discovering-support","title":"Discovering Support","text":"

The server uses a disco query to inform if carbons are enabled.

"},{"location":"modules/mod_carboncopy/#enabling-and-disabling-carbons-from-the-client","title":"Enabling and disabling Carbons from the client","text":"

Carbons are not enabled by default. Every client app has to enable carbons to get messages sent to other clients of the user. Carbons are enabled and disabled with an iq stanza with a child element - <enable xmlns='urn:xmpp:carbons:2'/> or <disable xmlns='urn:xmpp:carbons:2'/>.

"},{"location":"modules/mod_carboncopy/#receiving-messages-to-a-bare-jid","title":"Receiving messages to a bare JID","text":"

Each message to a bare JID is forked and sent to all carbon enabled resources of the recipient, and not just to the highest priority resource. Sending multiple copies to same resource is avoided.

"},{"location":"modules/mod_carboncopy/#receiving-messages-to-full-jid","title":"Receiving messages to full JID","text":"

Each directed message to a full JID is also forwarded to all carbon enabled resources of the recipient. The message is wrapped in the <forwarded xmlns='urn:xmpp:forward:0'></forwarded> tag and directed towards each carbon enabled resource.

"},{"location":"modules/mod_carboncopy/#sending-messages","title":"Sending Messages","text":"

Just as when receiving messages to a full JID, each sent message is forwarded to all carbon enabled resources of recipient. The message is wrapped in the <forwarded xmlns='urn:xmpp:forward:0'></forwarded> tag and is directed towards each carbon enabled resource.

"},{"location":"modules/mod_carboncopy/#private-messages","title":"Private Messages","text":"

Private messages are tagged <private/> and are not forwarded to any carbon enabled resource of the sender and recipient if the to attribute contains a full JID. However, if the message is sent to a bare JID, it is forked to all highest priority resources. This is not done through mod_carboncopy but is an expected outcome.

"},{"location":"modules/mod_carboncopy/#multiple-enabledisable-requests","title":"Multiple enable/disable requests","text":"

Multiple enable/disable requests are not treated as an error even if they come from the same resource.

"},{"location":"modules/mod_carboncopy/#behavior-with-other-modules","title":"Behavior with other modules","text":"
  • mod_offline: Offline messages are delivered as they are. Since, only one resource can connect at a time and there will be a finite time delay between login from two resources, mod_carboncopy has no role to play and only one resource can receive offline messages. Other resources can retrieve old messages from the archive.
  • mod_mam: mod_mam covers only direct messages from one user to another. All the forked messages for a message sent with a bare JID are ignored by mod_mam. Similarly, all the carbon messages are also ignored by mod_mam.
"},{"location":"modules/mod_carboncopy/#retrieving-archive-from-multiple-resources","title":"Retrieving archive from multiple resources","text":"

A resource can retrieve archives of messages sent to a specific resource of a friend which will not contain any carbon messages. It will only contain messages directed towards that resource or messages sent with a bare jid when that resource was at the highest priority. A request to mod_mam with a bare JID of the chosen user will retrieve all messages to them from any resource. There are no instances of copies of same messages being sent by mod_mam. This is because mod_mam does not archive carbon messages.

"},{"location":"modules/mod_carboncopy/#testing-with-a-client","title":"Testing with a client","text":"

The module and its behavior have been tested with mod_offline and mod_mam using a desktop client made in Java using the Smack library. The standard Smack library for carbons is able to unpack and read the carbon messages. Also, the standard library supports checking for carbon support by the server using disco and sending enable and disable requests for carbon messages. A client needs to synchronize with mod_offline and mod_mam. Once a client is online and enables carbons, it will not receive all the messages. mod_mam does not capture any carbon messages so it does not send any duplicates during any archive request. Only the simple chat messages are archived and they can be accessed by using the bare JID of the user for whom the archive is requested. For an Erlang-based test suite, please see this.

"},{"location":"modules/mod_carboncopy/#options","title":"Options","text":""},{"location":"modules/mod_carboncopy/#modulesmod_carboncopyiqdisctype","title":"modules.mod_carboncopy.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: no_queue

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_carboncopy/#example-configuration","title":"Example Configuration","text":"
[modules.mod_carboncopy]\n  iqdisc.type = \"no_queue\"\n
"},{"location":"modules/mod_csi/","title":"mod_csi","text":""},{"location":"modules/mod_csi/#module-description","title":"Module Description","text":"

Enables XEP-0352: Client State Indication functionality.

The XEP doesn't require any specific server behaviour in response to CSI stanzas, there are only some suggestions. The implementation in MongooseIM will simply buffer all packets (up to a configured limit) when the session is \"inactive\" and will flush the buffer when it becomes \"active\" again.

"},{"location":"modules/mod_csi/#options","title":"Options","text":""},{"location":"modules/mod_csi/#modulesmod_csibuffer_max","title":"modules.mod_csi.buffer_max","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 20
  • Example: buffer_max = 40

Buffer size for messages queued when session was inactive.

"},{"location":"modules/mod_csi/#example-configuration","title":"Example Configuration","text":"
[modules.mod_csi]\n  buffer_max = 40\n
"},{"location":"modules/mod_csi/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [Host, modCSIInactive] spiral A client becomes inactive. [Host, modCSIActive] spiral A client becomes active."},{"location":"modules/mod_disco/","title":"mod_disco","text":""},{"location":"modules/mod_disco/#module-description","title":"Module Description","text":"

Implements XEP-0030: Service Discovery. The module itself provides only the essential disco interface, the actual capabilities announced by Service Discovery are gathered via executing a fold-type hook.

"},{"location":"modules/mod_disco/#options","title":"Options","text":""},{"location":"modules/mod_disco/#modulesmod_discoiqdisctype","title":"modules.mod_disco.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_disco/#modulesmod_discoextra_domains","title":"modules.mod_disco.extra_domains","text":"
  • Syntax: array of strings, valid domain names
  • Default: no extra domains
  • Example: extra_domains = [\"custom_domain\"]

Adds domains that are not registered with other means to a local item announcement (response to http://jabber.org/protocol/disco#items IQ get). Please note that mod_disco doesn't verify these domains, so if no handlers are registered later for them, a client will receive a service-unavailable error for every stanza sent to one of these hosts.

"},{"location":"modules/mod_disco/#modulesmod_discoserver_info","title":"modules.mod_disco.server_info","text":"
  • Syntax: array of tables described below
  • Default: no additional server info
  • Example:
    server_info = [\n                {name = \"abuse-address\", urls = [\"admin@example.com\"]}\n              ]\n
    Adds extra disco information to all or chosen modules. New fields will be added in a manner compliant with XEP-0157: Contact Addresses for XMPP Services.

Keys and their values for each entry:

  • name - required, a non-empty string with the name of the field
  • urls - required, an array of valid addresses
  • modules - optional, an array of module names for which the additional server information is to be returned. By default the server information is returned for all modules.
"},{"location":"modules/mod_disco/#modulesmod_discousers_can_see_hidden_services","title":"modules.mod_disco.users_can_see_hidden_services","text":"
  • Syntax: boolean
  • Default: true
  • Example: users_can_see_hidden_services = false

MongooseIM node with this option set to false will exclude \"hidden components\" from disco results sent to clients (identified by bare or full JID). Other entities, with empty username part in their JIDs (e.g. component.example.com), will still receive full disco results.

"},{"location":"modules/mod_disco/#example-configuration","title":"Example Configuration","text":"
[modules.mod_disco]\n  iqdisc.type = \"one_queue\"\n  extra_domains = [\"some_domain\", \"another_domain\"]\n  server_info = [\n    {name = \"abuse-address\", urls = [\"admin@example.com\"]},\n    {name = \"friendly-spirits\", urls = [\"spirit1@localhost\", \"spirit2@localhost\"], modules = [\"mod_muc\", \"mod_disco\"]}\n  ]\n  users_can_see_hidden_services = true\n
"},{"location":"modules/mod_domain_isolation/","title":"mod_domain_isolation","text":""},{"location":"modules/mod_domain_isolation/#module-description","title":"Module Description","text":"

This module limits message passing between domains. When it is enabled, users won't be able to contact each other if they are registered in different domains.

"},{"location":"modules/mod_domain_isolation/#options","title":"Options","text":"

This module has no configuration. Putting the following entry in the config file is enough.

"},{"location":"modules/mod_domain_isolation/#example-configuration","title":"Example configuration","text":"
[modules.mod_domain_isolation]\n
"},{"location":"modules/mod_event_pusher/","title":"mod_event_pusher","text":""},{"location":"modules/mod_event_pusher/#module-description","title":"Module Description","text":"

This module is a generic interface for event-pushing backends. It defines a single callback, push_event/2 that forwards the event to all registered backends. Each backend decides how and if to handle the event in its push_event/2 implementation.

Currently supported backends include http, push, rabbit and sns. Refer to their specific documentation to learn more about their functions and configuration options.

"},{"location":"modules/mod_event_pusher/#how-it-works","title":"How it works","text":"

The events are standardized as records that can be found in the mod_event_pusher_events.hrl file. Common events like user presence changes (offline and online), chat and groupchat messages (incoming and outgoing) are already handled in the mod_event_pusher_hook_translator module, which is a proxy between various hooks and the push_event/2 handler.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_event_pusher/#configuration","title":"Configuration","text":"

Each backend is configured in a corresponding subsection. The example below enables all backends. The [modules.mod_event_pusher] section itself is omitted - this is allowed in TOML, because the presence of a subsection implies that the corresponding parent section is also present.

Note

Some backends require configuring connection pools in the outgoing_pools section. See the detailed documentation for each backend.

[modules.mod_event_pusher.sns]\n  presence_updates_topic = \"user_presence_updated\"\n  pm_messages_topic = \"user_message_sent\"\n  muc_messages_topic = \"user_messagegroup_sent\"\n  sns_host = \"eu-west-1.amazonaws.com\"\n  region = \"eu-west-1\"\n  access_key_id = \"AKIAIOSFODNN7EXAMPLE\"\n  secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\n  account_id = \"123456789012\"\n\n[modules.mod_event_pusher.push]\n  wpool.workers = 200\n\n[modules.mod_event_pusher.http]\n  handlers = [{path = \"/notifications\"}]\n\n[modules.mod_event_pusher.rabbit]\n
"},{"location":"modules/mod_event_pusher_http/","title":"HTTP backend","text":""},{"location":"modules/mod_event_pusher_http/#module-description","title":"Module description","text":"

This module is a backend of mod_event_pusher that enables forwarding certain events (messages, presence, etc.) via HTTP to external services such as push (by mobile, email or SMS), big data, or analytics services.

"},{"location":"modules/mod_event_pusher_http/#how-it-works","title":"How it works","text":"

The module hooks on all packets sent by connected users. When the hook is triggered, the module:

  • runs a callback module's should_make_req/6 function to see if a notification should be sent
  • runs a callback module's prepare_headers/7 to get http headers to be used
  • runs a callback module's prepare_body/7
  • sends a POST request composed of {Host::binary(), Sender::binary(), Receiver::binary(), Message::binary()} to the http notification server

You can configure multiple handlers e.g. for sending various types of messages to different HTTP servers.

"},{"location":"modules/mod_event_pusher_http/#prerequisites","title":"Prerequisites","text":"

This module uses a connection pool created by mongoose_http_client. It must be defined in the outgoing_pools settings.

"},{"location":"modules/mod_event_pusher_http/#options","title":"Options","text":""},{"location":"modules/mod_event_pusher_http/#modulesmod_event_pusherhttphandlers","title":"modules.mod_event_pusher.http.handlers","text":"
  • Syntax: array of TOML tables with the keys described below
  • Default: empty list

A list of handler definitions. All handlers are applied for each event.

"},{"location":"modules/mod_event_pusher_http/#modulesmod_event_pusherhttphandlerspool_name","title":"modules.mod_event_pusher.http.handlers.pool_name","text":"
  • Syntax: non-empty string
  • Default: \"http_pool\"
  • Example: pool_name = \"http_pool\"

Name of the pool to use to connect to the HTTP server (as defined in outgoing_pools).

"},{"location":"modules/mod_event_pusher_http/#modulesmod_event_pusherhttphandlerspath","title":"modules.mod_event_pusher.http.handlers.path","text":"
  • Syntax: string
  • Default: \"\"
  • Example: path = \"/notifications\"

Path part of an URL to which a request should be sent (will be appended to the pool's prefix path).

"},{"location":"modules/mod_event_pusher_http/#modulesmod_event_pusherhttphandlerscallback_module","title":"modules.mod_event_pusher.http.handlers.callback_module","text":"
  • Syntax: string
  • Default: \"mod_event_pusher_http_defaults\"
  • Example: callback_module = \"mod_event_pusher_http_notifications\"

Name of a module which should be used to check whether a notification should be sent. The default callback module, mod_event_pusher_http_defaults, sends notifications for all non-empty chat messages. You can use this module as a starting point for developing a custom one.

"},{"location":"modules/mod_event_pusher_http/#example-configuration","title":"Example configuration","text":"
[outgoing_pools.http.http_pool]\n  scope = \"global\"\n  workers = 50\n\n  [outgoing_pools.http.http_pool.connection]\n    host = \"http://localhost:8000\"\n    path_prefix = \"/webservice\"\n    request_timeout = 2000\n\n[modules.mod_event_pusher.http]\n  handlers = [{pool_name = \"http_pool\", path = \"/notifications\"}]\n

Notifications will be POSTed to http://localhost:8000/webservice/notifications. TOML also allows to specify the handler in its own subsection.

[[modules.mod_event_pusher.http.handlers]]\n  pool_name = \"http_pool\"\n  path = \"/notifications\"\n

This alternative syntax is useful for specifying multiple handlers with options:

[[modules.mod_event_pusher.http.handlers]]\n  pool_name = \"http_pool\"\n  path = \"/notifications\"\n  callback_module = \"mod_event_pusher_http_notifications\"\n\n[[modules.mod_event_pusher.http.handlers]]\n  pool_name = \"http_pool\"\n  path = \"/alerts\"\n  callback_module = \"mod_event_pusher_http_alerts\"\n

Here, some notifications will be POSTed to http://localhost:8000/webservice/notifications and some to http://localhost:8000/webservice/alerts, depending on implementation of should_make_req/6 in the two callback modules.

"},{"location":"modules/mod_event_pusher_http/#default-payload-format","title":"Default payload format","text":"

The default HTTP event pusher sends a POST request with Content-Type application/x-www-form-urlencoded. The form has the following fields:

  • author: name of the user who authored the message
  • server: name of the server from where the message originates
  • receiver: name of the user who the message is for
  • message: content of <body> element of the message

The contents of the author, server and receiver fields are processed by stringprep. As a result, these values are all lower case.

"},{"location":"modules/mod_event_pusher_http/#example","title":"Example","text":"

Below is an example of what the body of an HTTP POST request can look like:

\"author=alice&server=localhost&receiver=bob&message=Hi, Bob!\"\n

"},{"location":"modules/mod_event_pusher_http/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [Host, mod_event_pusher_http, sent] spiral An HTTP notification is sent successfully. [Host, mod_event_pusher_http, failed] spiral An HTTP notification failed. [Host, mod_event_pusher_http, response_time] histogram Does not include timings of failed requests."},{"location":"modules/mod_event_pusher_push/","title":"Push backend","text":""},{"location":"modules/mod_event_pusher_push/#module-description","title":"Module Description","text":"

This module is a backend for mod_event_pusher that implements XEP-0357: Push Notifications. It provides push notification data to the service that delivers actual notifications to a client device.

We've prepared a detailed tutorial for a proper push notifications setup on both client and server side.

Please make sure that clients provide all form fields required by the specified PubSub node. Some publish errors may result in disabling push notifications for the specific device until it attempts to enable them again.

This module is very easy to enable, just paste the following to your MongooseIM configuration file:

[modules.mod_event_pusher]\n  push.wpool.workers = 100\n

And that's basically it. You have just enabled the push notification support with 100 asynchronous workers that will handle all push notification related work.

"},{"location":"modules/mod_event_pusher_push/#options","title":"Options","text":""},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushiqdisctype","title":"modules.mod_event_pusher.push.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushbackend","title":"modules.mod_event_pusher.push.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Backend to use for storing the registrations.

"},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushwpool","title":"modules.mod_event_pusher.push.wpool","text":"
  • Syntax: TOML table with worker pool options
  • Default: see description
  • Example: wpool.workers = 200

Pool options that will be passed to the worker_pool library that handles all the requests. The options allowed here are the same as for the outgoing connection pools. The only difference is that the default strategy is \"available_worker\".

"},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushplugin_module","title":"modules.mod_event_pusher.push.plugin_module","text":"
  • Syntax: non-empty string
  • Default: \"mod_event_pusher_push_plugin_defaults\"
  • Example: plugin_module = \"mod_event_pusher_push_plugin_defaults\"

The module implementing mod_event_pusher_push_plugin behaviour, used for dynamic configuration of push notifications. See the relevant section for more details.

"},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushvirtual_pubsub_hosts","title":"modules.mod_event_pusher.push.virtual_pubsub_hosts","text":"
  • Syntax: array of strings
  • Default: []
  • Example: virtual_pubsub_hosts = [\"host1\", \"host2\"]

The list of \"simulated\" Publish-Subscribe domains. You may use the @HOST@ pattern in the domain name. It will automatically be replaced by a respective XMPP domain (e.g. localhost). See the relevant section for more details.

"},{"location":"modules/mod_event_pusher_push/#virtual-pubsub-hosts","title":"Virtual PubSub hosts","text":"

If a notification is published to one of the configured domains, the internal push notification hook is executed in MongooseIM instead of the XEP-0357 typical behaviour. If an existing PubSub domain is added to this list, it will be shadowed in the push notifications context. To ensure complete shadowing of all the PubSub subdomains you must use the @HOST@ pattern, otherwise only the subdomain of the user is shadowed. It enables easy migration from PubSub-full deployments to PubSub-less variants.

"},{"location":"modules/mod_event_pusher_push/#migration-from-xep-0357-to-virtual-hosts","title":"Migration from XEP-0357 to virtual hosts","text":"

This is an example of how you can migrate the existing setup to the new model. PubSub service still exists, just for the case of a user attempting to create a node. However, its domain is overridden for the purpose of sending push notifications. Please note the value of virtual_pubsub_hosts option. \"pubsub.@HOST@\" is the default domain for mod_pubsub.

[modules.mod_pubsub]\n  plugins = [\"push\"] # mandatory minimal config\n\n[modules.mod_event_pusher.push]\n  backend = \"mnesia\" # optional\n  wpool.workers = 200 # optional\n  plugin_module = \"mod_event_pusher_push_plugin_defaults\" # optional\n  virtual_pubsub_hosts = [\"pubsub.@HOST@\"]\n
"},{"location":"modules/mod_event_pusher_push/#advantages","title":"Advantages","text":"
  • Versatility: PubSub-less and PubSub-full mechanisms can be configured with different domains and therefore give fine-grained control over the push notification handling
  • Takes advantage of the PubSub-less efficiency when told to do so
  • Fully compliant with XEP-0357: Push Notifications and therefore with most 3rd party client libraries
  • Ideal for migrations to PubSub-less deployments.
"},{"location":"modules/mod_event_pusher_push/#drawbacks","title":"Drawbacks","text":"
  • More complex configuration on the server side
  • Pays the PubSub performance penalty when the PubSub path is taken
"},{"location":"modules/mod_event_pusher_push/#plugin-module","title":"Plugin module","text":"

You can also control the format of the \"sender\" of the push notification (which ultimately becomes the title of push notification) and filter which messages will trigger the notification. In order to achieve that, you need to create a plugin module that implements the mod_event_pusher_push_plugin behaviour and enable this plugin in the plugin_module section as above.

A plugin module handles the dynamic configuration of push notifications. It contains the filtering and custom logic for notifying about messages.

Two plugin implementations are provided. They offer different behaviour considering unacknowledged messages when using XEP-0198: Stream Management:

  • mod_event_pusher_push_plugin_defaults, which implements an older behaviour. It does not notify the user of unacknowledged messages immediately after detecting a lost connection to the user.
  • mod_event_pusher_push_plugin_enhanced, which pushes notifications as soon as the server detects that the client has disconnected and waits for stream resumption (by an unack_msg_event event generated by the unacknowledged_message hook). This immediate notification prevents the unneeded suspension of the client's application, if there are no unacknowledged messages yet. This allows to create more power efficient mobile applications.

In order for the enhanced plugin to work, each device (an entity that may receive push notifications) should be uniquely identified. The only correct way to identify a device from the XMPP standpoint is to use the data provided with the enable stanza. Because of that, each device should (re)enable the push notifications at the beginning of each and every connection.

"},{"location":"modules/mod_event_pusher_push/#custom-plugins","title":"Custom plugins","text":"

A custom module implementing the optional callbacks of mod_event_pusher_push_plugin may be used as a plugin to change the default behaviour. In the case of not implemented callbacks the defaults are used instead.

"},{"location":"modules/mod_event_pusher_rabbit/","title":"RabbitMQ backend","text":""},{"location":"modules/mod_event_pusher_rabbit/#current-status","title":"Current status","text":"

This module is still in an experimental phase.

"},{"location":"modules/mod_event_pusher_rabbit/#module-description","title":"Module Description","text":"

This module is a backend of mod_event_pusher that enables support for the RabbitMQ integration. Currently there are 5 available notifications:

  • user presence changed - Carries the user id (full jid by default) and a boolean field corresponding to the current user online status.
  • private message sent/received - Carries the user ids (both sender and receiver) along with the message body.
  • group message sent/received - Carries the user id and the room id (full jids by default) along with the message body.

All these notifications are sent as JSON strings to RabbitMQ exchanges. Type of exchanges can be chosen as desired. Each type of the notifications is sent to its dedicated exchange. There are three exchanges created on startup of the module, for presences, private messages and group chat messages related events.

Messages are published to a RabbitMQ server with routing key being set to a user bare jid (user@domain) and configurable topic e.g alice@localhost.private_message_sent.

The module requires rabbit pool of AMQP connections to be configured in order to make the module work. It's well advised to read through Advanced configuration/Outgoing connections section before enabling the module.

"},{"location":"modules/mod_event_pusher_rabbit/#presence-exchange-options","title":"Presence exchange options","text":""},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitpresence_exchangename","title":"modules.mod_event_pusher.rabbit.presence_exchange.name","text":"
  • Syntax: non-empty string
  • Default: \"presence\"
  • Example: name = \"custom_presence_name\"

Defines RabbitMQ presence exchange name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitpresence_exchangetype","title":"modules.mod_event_pusher.rabbit.presence_exchange.type","text":"
  • Syntax: non-empty string
  • Default: \"topic\"
  • Example: type = \"custom_presence_topic\"

Defines RabbitMQ presence exchange type.

"},{"location":"modules/mod_event_pusher_rabbit/#chat-message-options","title":"Chat message options","text":""},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitchat_msg_exchangename","title":"modules.mod_event_pusher.rabbit.chat_msg_exchange.name","text":"
  • Syntax: non-empty string
  • Default: \"chat_msg\"
  • Example: name = \"custom_msg_name\"

Defines RabbitMQ chat message exchange name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitchat_msg_exchangetype","title":"modules.mod_event_pusher.rabbit.chat_msg_exchange.type","text":"
  • Syntax: non-empty string
  • Default: \"topic\"
  • Example: type = \"custom_msg_topic\"

Defines RabbitMQ chat message exchange type.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitchat_msg_exchangesent_topic","title":"modules.mod_event_pusher.rabbit.chat_msg_exchange.sent_topic","text":"
  • Syntax: non-empty string
  • Default: \"chat_msg_sent\"
  • Example: sent_topic = \"custom_sent_topic\"

Defines RabbitMQ chat message sent topic name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitchat_msg_exchangerecv_topic","title":"modules.mod_event_pusher.rabbit.chat_msg_exchange.recv_topic","text":"
  • Syntax: non-empty string
  • Default: \"chat_msg_recv\"
  • Example: recv_topic = \"custom_recv_topic\"

Defines RabbitMQ chat message received topic name.

"},{"location":"modules/mod_event_pusher_rabbit/#group-chat-message-options","title":"Group chat message options","text":""},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitgroupchat_msg_exchangename","title":"modules.mod_event_pusher.rabbit.groupchat_msg_exchange.name","text":"
  • Syntax: non-empty string
  • Default: \"groupchat_msg\"
  • Example: name = \"custom_group_msg_name\"

Defines RabbitMQ group chat message exchange name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitgroupchat_msg_exchangetype","title":"modules.mod_event_pusher.rabbit.groupchat_msg_exchange.type","text":"
  • Syntax: non-empty string
  • Default: \"topic\"
  • Example: type = \"custom_group_msg_topic\"

Defines RabbitMQ group chat message exchange type.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitgroupchat_msg_exchangesent_topic","title":"modules.mod_event_pusher.rabbit.groupchat_msg_exchange.sent_topic","text":"
  • Syntax: non-empty string
  • Default: \"groupchat_msg_sent\"
  • Example: sent_topic = \"custom_group_sent_topic\"

Defines RabbitMQ group chat message sent topic name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitgroupchat_msg_exchangerecv_topic","title":"modules.mod_event_pusher.rabbit.groupchat_msg_exchange.recv_topic","text":"
  • Syntax: non-empty string
  • Default: \"groupchat_msg_recv\"
  • Example: recv_topic = \"custom_group_recv_topic\"

Defines RabbitMQ group chat message received topic name.

"},{"location":"modules/mod_event_pusher_rabbit/#example-configuration","title":"Example configuration","text":"
[modules.mod_event_pusher.rabbit]\n  presence_exchange.name =\"presence\"\n  presence_exchange.type = \"topic\"\n  chat_msg_exchange.name = \"chat_msg\"\n  chat_msg_exchange.sent_topic = \"chat_msg_sent\"\n  chat_msg_exchange.recv_topic = \"chat_msg_recv\"\n  groupchat_msg_exchange.name = \"groupchat_msg\"\n  groupchat_msg_exchange.sent_topic = \"groupchat_msg_sent\"\n  groupchat_msg_exchange.recv_topic = \"groupchat_msg_recv\"\n
"},{"location":"modules/mod_event_pusher_rabbit/#json-schema-examples","title":"JSON Schema examples","text":"

The different kinds of notifications deliver slightly different messages. The messages are delivered in a JSON format.

"},{"location":"modules/mod_event_pusher_rabbit/#presence-updates","title":"Presence updates","text":"

The JSON format for an online presence update notification is:

{\n    \"user_id\": \"alice@localhost/res1\",\n    \"present\": true\n}\n

For offline presence updates, the present boolean value is set to false:

{\n    \"user_id\": \"alice@localhost/res1\",\n    \"present\": false\n}\n
"},{"location":"modules/mod_event_pusher_rabbit/#sentreceived-messages","title":"Sent/received messages","text":"

The JSON format for a private message notification is:

{\n    \"to_user_id\": \"bob@localhost/res1\",\n    \"message\": \"Hello, Bob\",\n    \"from_user_id\": \"alice@localhost/res1\"\n}\n

The notification is similar for group messages. For example for \"sent\" events:

{\n    \"to_user_id\": \"muc_publish@muc.localhost\",\n    \"message\": \"Hi, Everyone!\",\n    \"from_user_id\": \"bob@localhost/res1\"\n}\n

and for \"received\" events:

{\n    \"to_user_id\": \"bob@localhost/res1\",\n    \"message\": \"Hi, Everyone!\",\n    \"from_user_id\": \"muc_publish@muc.localhost/alice\"\n}\n
"},{"location":"modules/mod_event_pusher_rabbit/#metrics","title":"Metrics","text":"

The module provides some metrics related to RabbitMQ connections and messages as well. Provided metrics:

name type description (when it gets incremented/decremented) [Host, connections_active] spiral A connection to a RabbitMQ server is opened(+1)/closed(-1). [Host, connections_opened] spiral A connection to a RabbitMQ server is opened. [Host, connections_closed] spiral A connection to a RabbitMQ server is closed. [Host, connection_failed ] spiral A try to open a connection to a RabbitMQ server failed. [Host, messages_published] spiral A message to a RabbitMQ server is published. [Host, messages_failed] spiral A message to a RabbitMQ server is rejected. [Host, messages_timeout] spiral A message to a RabbitMQ server timed out (weren't confirmed by the server). [Host, message_publish_time] histogram Amount of time it takes to publish a message to a RabbitMQ server and receive a confirmation. It's measured only for successful messages. [Host, message_payload_size] histogram Size of a message (in bytes) that was published to a RabbitMQ server (including message properties). It's measured only for successful messages.

All the above metrics have a prefix which looks as follows: <xmpp_host>.backends.mod_event_pusher_rabbit.<metric_name>. For example a proper metric name would look like: localhost.backends.mod_event_pusher_rabbit.connections_active

"},{"location":"modules/mod_event_pusher_rabbit/#guarantees","title":"Guarantees","text":"

There are no guarantees. The current implementation uses \"best effort\" approach which means that we don't care if a message is delivered to a RabbitMQ server. If publisher confirms are enabled and a message couldn't be delivered to the server for some reason (the server sent negative acknowledgment/didn't sent it at all or there was a channel exception) the module just updates appropriate metrics and prints some log messages. Notice that there might be situations when a message silently gets lost.

"},{"location":"modules/mod_event_pusher_rabbit/#type-of-exchanges","title":"Type of exchanges","text":"

By default all the exchanges used are of type topic. Using topic exchanges gives a lot of flexibility when binding queues to such an exchange by using # and * in binding keys. But flexibility comes at the cost of performance - imagine a scenario where there are thousands of users and AMQP consumers use binding keys for particular users which look like user_N@host.#. In such case RabbitMQ has to go through all the users in order to find out where a message should be sent to. This operations is proved to be costly. In a load test with 100k users a delay caused by this operation was substantial (about an order of magnitude higher than compared to a load test with 60k users).

If performance is a top priority go for direct exchanges. Using this type of exchanges is proved to work efficiently with 100k users. Keep in mind it gives up flexibility over performance.

"},{"location":"modules/mod_event_pusher_rabbit/#publisher-confirms","title":"Publisher confirms","text":"

By default publisher confirmations are disabled. However, one-to-one confirmations can be enabled (see RabbitMQ connection setup section). When a worker sends a message to a RabbitMQ server it waits for a confirmation from the server before it starts to process next message. This approach allows to introduce backpressure on a RabbitMQ server connection cause the server can reject/not confirm messages when it's overloaded. On the other hand it can cause performance degradation.

"},{"location":"modules/mod_event_pusher_rabbit/#worker-selection-strategy","title":"Worker selection strategy","text":"

The module uses mongoose_wpool for managing worker processes and best_worker strategy, for choosing a worker, is in use by default. Different strategies imply different behaviors of the system.

"},{"location":"modules/mod_event_pusher_rabbit/#event-messages-queuing","title":"Event messages queuing","text":"

When available_worker strategy is in use all the event messages are queued in single worker pool manager process state. When different strategy is set e.g best_worker those messages are placed in worker processes inboxes. Worker selection strategy can be set in rabbit pool configuration.

"},{"location":"modules/mod_event_pusher_rabbit/#event-messages-ordering","title":"Event messages ordering","text":"

None of worker selection strategies ensures that user events will be delivered to a RabbitMQ server properly ordered in time.

"},{"location":"modules/mod_event_pusher_sns/","title":"SNS backend","text":""},{"location":"modules/mod_event_pusher_sns/#module-description","title":"Module Description","text":"

This module is a backend of mod_event_pusher that enables support for the Amazon SNS service. Currently there are 3 available notifications:

  • user presence changed - Carries the user id (bare jid by default) and a boolean field corresponding to the current user online status.
  • private message sent - Carries the user ids (both sender and receiver) along with the message body.
  • group message sent - Carries the user id and the room id (bare jids by default) along with the message body.

All these notifications are sent as a JSON string to Amazon SNS along with custom MessageAttributes (see http://docs.aws.amazon.com/sns/latest/api/API_Publish.html). MessageAttributes can be specified via a plugin module (more details in Options section).

Full topics for notifications (ARN as defined in Amazon Resource Names) are constructed as arn:aws:sns:{region}:{account_id}:{topic} where {region} and {account_id} are substituted with corresponding values from configuration options. {topic} is pulled from configuration option presence_updates_topic, pm_messages_topic or muc_messages_topic based on the notification type.

"},{"location":"modules/mod_event_pusher_sns/#options","title":"Options","text":""},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspresence_updates_topic","title":"modules.mod_event_pusher.sns.presence_updates_topic","text":"
  • Syntax: string
  • Default: no default is given
  • Example: presence_updates_topic = \"user_presence_updated\"

Defines Amazon SNS Topic for presence change notifications. Remove this option to disable these notifications.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspm_messages_topic","title":"modules.mod_event_pusher.sns.pm_messages_topic","text":"
  • Syntax: string
  • Default: no default is given
  • Example: pm_messages_topic = \"user_message_sent\"

Defines Amazon SNS Topic for private message notifications. Remove this option to disable these notifications.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsmuc_messages_topic","title":"modules.mod_event_pusher.sns.muc_messages_topic","text":"
  • Syntax: string
  • Default: no default is given
  • Example: muc_messages_topic = \"user_messagegroup_sent\"

Defines Amazon SNS Topic for group message notifications. Remove this option to disable these notifications.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsplugin_module","title":"modules.mod_event_pusher.sns.plugin_module","text":"
  • Syntax: string
  • Default: \"mod_event_pusher_sns_defaults\"
  • Example: plugin_module = \"mod_event_pusher_sns_defaults\"

Sets a callback module used for creating user's GUID used in notifications (from user's JID) and for defining custom attributes attached to a published SNS message.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnssns_host","title":"modules.mod_event_pusher.sns.sns_host","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: sns_host = \"sns.eu-west-1.amazonaws.com\"

URL to the Amazon SNS service. The URL may be in virtual host form, and for AWS needs to point at a specific regional endpoint. The scheme, port and path specified in the URL will be used to publish notifications via HTTP POST method.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsregion","title":"modules.mod_event_pusher.sns.region","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: region = \"eu-west-1\"

The AWS region to use for requests.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsaccess_key_id","title":"modules.mod_event_pusher.sns.access_key_id","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: access_key_id = \"AKIAIOSFODNN7EXAMPLE\"

ID of the access key to use for authorization.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnssecret_access_key","title":"modules.mod_event_pusher.sns.secret_access_key","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"

Secret access key to use for authorization.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsaccount_id","title":"modules.mod_event_pusher.sns.account_id","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: account_id = \"123456789012\"

12 digit number as defined in AWS Account Identifiers to use for creating TopicArn for publishing notifications.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspool_size","title":"modules.mod_event_pusher.sns.pool_size","text":"
  • Syntax: positive integer
  • Default: 100
  • Example: pool_size = 100

Worker pool size for publishing notifications

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspublish_retry_count","title":"modules.mod_event_pusher.sns.publish_retry_count","text":"
  • Syntax: non-negative integer
  • Default: 2
  • Example: publish_retry_count = 2

Retry count in case of a publish error.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspublish_retry_time_ms","title":"modules.mod_event_pusher.sns.publish_retry_time_ms","text":"
  • Syntax: non-negative integer
  • Default: 50
  • Example: publish_retry_time_ms = 50

Base exponential backoff time (in ms) for publish errors.

"},{"location":"modules/mod_event_pusher_sns/#example-configuration","title":"Example configuration","text":"
[modules.mod_event_pusher.sns]\n  presence_updates_topic = \"user_presence_updated\"\n  pm_messages_topic = \"user_message_sent\"\n  muc_messages_topic = \"user_messagegroup_sent\"\n  sns_host = \"sns.eu-west-1.amazonaws.com\"\n  region = \"eu-west-1\"\n  access_key_id = \"AKIAIOSFODNN7EXAMPLE\"\n  secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\n  account_id = \"123456789012\"\n
"},{"location":"modules/mod_event_pusher_sns/#json-schema-examples","title":"JSON Schema examples","text":"

The different kinds of notifications deliver slightly different messages. The messages are delivered in a JSON format.

"},{"location":"modules/mod_event_pusher_sns/#presence-updates","title":"Presence updates","text":"

The JSON format for an online presence update notification is:

{\n    \"user_id\": \"alice@localhost\",\n    \"present\": true\n}\n

For offline presence updates, the present boolean value is set to false:

{\n    \"user_id\": \"alice@localhost\",\n    \"present\": false\n}\n
"},{"location":"modules/mod_event_pusher_sns/#sent-messages","title":"Sent messages","text":"

The JSON format for a private message notification is:

{\n    \"to_user_id\": \"bob@localhost\",\n    \"message\": \"Hello, Bob\",\n    \"from_user_id\": \"alice@localhost\"\n}\n

The notification is similar for group messages except that the to_user_id is the recipient room JID. For example:

{\n    \"to_user_id\": \"muc_publish@muc.localhost\",\n    \"message\": \"Hi, Everyone!\",\n    \"from_user_id\": \"bob@localhost\"\n}\n
"},{"location":"modules/mod_extdisco/","title":"mod_extdisco","text":""},{"location":"modules/mod_extdisco/#module-description","title":"Module Description","text":"

Implements XEP-0215: External Service Discovery for discovering information about services external to the XMPP network. The main use-case is to help discover STUN/TURN servers to allow for negotiating media exchanges.

"},{"location":"modules/mod_extdisco/#options","title":"Options","text":""},{"location":"modules/mod_extdisco/#modulesmod_extdiscoiqdisctype","title":"modules.mod_extdisco.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming IQ stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservice","title":"modules.mod_extdisco.service","text":"
  • Syntax: TOML array with one table for each advertised service - see below for details.
  • Default: [] - no services advertised
"},{"location":"modules/mod_extdisco/#service-options","title":"Service options","text":"

Each advertised service is specified as a TOML table containing the following options listed below.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservicetype","title":"modules.mod_extdisco.service.type","text":"
  • Syntax: string
  • Default: none, this option is required
  • Example: type = \"stun\"

Service type, common values are \"stun\", \"turn\", \"ftp\".

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservicehost","title":"modules.mod_extdisco.service.host","text":"
  • Syntax: string
  • Default: none, this option is required
  • Example: host = \"192.168.0.2\"

Hostname or an IP address where the service is hosted.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoserviceport","title":"modules.mod_extdisco.service.port","text":"
  • Syntax: integer, between 0 and 65535, non-inclusive
  • Default: none, this option is recommended
  • Example: port = 3478

The communications port to be used at the host.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservicetransport","title":"modules.mod_extdisco.service.transport","text":"
  • Syntax: string, one of \"udp\", \"tcp\"
  • Default: none, this option is optional
  • Example: transport = \"udp\"

The underlying transport protocol to be used when communicating with the service.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoserviceusername","title":"modules.mod_extdisco.service.username","text":"
  • Syntax: string
  • Default: none, this option is optional
  • Example: username = \"username\"

A service-generated username for use at the service.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservicepassword","title":"modules.mod_extdisco.service.password","text":"
  • Syntax: string
  • Default: none, this option is optional
  • Example: password = \"password\"

A service-generated password for use at the service.

"},{"location":"modules/mod_extdisco/#example-configuration","title":"Example Configuration","text":"
[modules.mod_extdisco]\n\n  [[modules.mod_extdisco.service]]\n    type = \"stun\"\n    host = \"127.0.0.1\"\n    port = 3478\n    transport = \"udp\"\n    username = \"username\"\n    password = \"password\"\n\n  [[modules.mod_extdisco.service]]\n    type = \"stun\"\n    host = \"stun.host.com\"\n    port = 3478\n    transport = \"tcp\"\n    username = \"username2\"\n    password = \"password2\"\n\n  [[modules.mod_extdisco.service]]\n    type = \"turn\"\n    host = \"turn.host.com\"\n
"},{"location":"modules/mod_global_distrib/","title":"mod_global_distrib","text":""},{"location":"modules/mod_global_distrib/#module-description","title":"Module Description","text":"

This module enables global distribution of a single XMPP domain. With mod_global_distrib, multiple distinct MongooseIM clusters can share a single domain name and route messages to the specific datacenter where the recipient is available.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_global_distrib/#how-it-works","title":"How it works","text":"

There are multiple subsystems that cooperate to enable global distribution:

"},{"location":"modules/mod_global_distrib/#metadata-sharing","title":"Metadata sharing","text":"

Sharing of metadata is done by leveraging a database with cross-datacenter replication. Currently, only Redis is supported, with Dynomite layer for replication. The most important metadata stored in the database is a session/routing table. The table stores mappings between currently logged users' JIDs and datacenters on which they are logged in. Because access to the session table is very frequent, its entries are additionally cached on each node.

To preserve consistency between database instances, all data is stored with a set expiration time and is periodically refreshed. Each node of each cluster is responsible for refreshing its own data. Thus, in an event of a netsplit, datacenters' information about unreachable datacenters' users will expire, as those users are now unreachable; but once the connection is reestablished, the data will be replicated again as datacenters refresh their entries. Additionally, to prevent edge cases where an incoming message is received and replied to before the datacenter learns about the sender's host, an incoming message also carries information about its origin which may be used to temporarily update the local routing table.

"},{"location":"modules/mod_global_distrib/#redis-entries","title":"Redis entries","text":"

Following structures are stored in Redis:

  • JID mappings are stored as normal key-value entries, where user's JID (full and bare) is the key, and the value is the local hostname where the user is logged in. Example: \"user1@example.com/res\" -> \"dc2.example.com\".
  • Domains of components and services registered on the globally distributed host are stored in per-node set structures where the key is <local_host>#<node_name>#{domains}, and the values are the domain names. Example: \"dc1.example.com#mongoose1@dc1.example.com#{domains}\" -> {\"muc1.example.com\", \"muc2.example.com\"}.
  • Domains of non-hidden components and services (see the XMPP Components documentation) are stored in per-node set structures where the key is <local_host>#<node_name>#{public_domains}, and the values are the domain names.
  • Declared endpoints available on a node are similarly stored in a per-node set structure where the key is <local_host>#<node_name>#{endpoints} and the values represent the TCP endpoints of the node. Example: \"dc1.example.com#mongoose1@dc1.example.com#{endpoints}\" -> {\"172.16.2.14#8231\", \"2001:0db8:85a3:0000:0000:8a2e:0370:7334#8882\"}.
  • Nodes that comprise a host are stored in a set structure with key <local_host>#{nodes} and values being the names of the nodes. Example: \"dc2.example.com#{nodes}\" -> {\"node1@dc2.example.com\", \"node3@dc2.example.com\"}.
  • Hosts are stored in a set with key hosts and values being the individual local XMPP domains. Example: \"hosts\" -> {\"dc1.example.com\", \"dc2.example.com\"}.
"},{"location":"modules/mod_global_distrib/#message-routing","title":"Message routing","text":"

mod_global_distrib establishes its own listeners and dedicated TCP/TLS connections for message routing. Each node listens on preconfigured endpoints, where each node in a datacenter can have any number of endpoints, including none. The endpoints are shared between all datacenters. If a node becomes unavailable, its endpoint entries in the database will expire and will be read once the node comes back online.

Connections between nodes in distinct datacenters are opened on the first request and then maintained as long as the destination endpoint is present in Redis. When a node needs to connect to a remote cluster, specified number of connections are opened to every endpoint reported by that datacenter. Global distribution features automatic rebalancing feature that will \"disable\" connections when their respective endpoints disappear from Redis. A new pool of connections is created each time a new endpoint is recognised. Whenever a node receives a message that is determined (by consulting the session table) to be destined for another datacenter, the routing procedure in the current datacenter is interrupted, the message is transported to the other datacenter via the dedicated connections, and the routing procedure is restarted there by a dedicated (but potentially short lived) worker process bound to the sender's JID (or subdomain if the sender's JIDs does not belong to the globally distributed domain). Client's process binds itself to a connection to a remote datacenter on first use, and henceforth always uses this connection to route messages directed to this datacenter. This - along with the dedicated worker process on the receiver's side - ensures that simple cross-datacenter messages between two entities are delivered in their sending order.

It may happen that a message is rerouted through multiple datacenters (e.g. if the user has reconnected to a different datacenter while the message was already in flight). Messages are given a TTL parameter by the source datacenter so that they cannot be rerouted indefinitely. The TTL is decreased on each reroute. Note that in the edge case of multi-datacenter routing, the messages may be received out-of-order at the destination datacenter.

"},{"location":"modules/mod_global_distrib/#bounce","title":"Bounce","text":"

Consider the following edge case: user U1 logged into datacenter DC2 and then quickly reconnected to datacenter DC3. Because session table has not yet been replicated, DC2 does not see U1 in the session table, while a different datacenter DC1 still sees U1 logged into DC2. When U2 logged into DC1 and sent a message to U1, it will now be rerouted to DC2 even though the user is now available at DC3.

Bounce mechanism solves this and similar edge cases by storing messages for which there is no known routing in the current datacenter. The stored messages are then assigned a bounce-TTL value and periodically - with backoff - are attempted to be routed again. In the example above, the message from U2 would be temporarily stored at DC2 and rerouted successfully once DC2 learns (via replication) that U1 is available at DC3.

Note

Bounce mechanism, similarly to multi-datacenter routing, may result in out-of-order messages being received at the destination datacenter.

"},{"location":"modules/mod_global_distrib/#metrics","title":"Metrics","text":"

Global distribution modules expose several per-datacenter metrics that can be used to monitor health of the system. All metrics begin with global.mod_global_distrib prefix:

  • outgoing.messages.<host>: number of cross-datacenter messages sent by this cluster to a given host.
  • incoming.messages.<host>: number of cross-datacenter messages received by this cluster from a given host.
  • incoming.transfer_time.<host> [us]: time elapsed between sending and receiving the message over the network from a given host. The duration is calculated using wall clock times on sender and receiver node.
  • outgoing.queue_time.<host> [us]: time elapsed while message waits in a queue of a sender's connection to a given host. High value of this metric may be remedied by increasing the number of connections to other hosts.
  • incoming.queue_time [us]: time elapsed while message waits in routing worker's queue. This value is not reported per-host as routing workers are bound to the sender's JID.
  • incoming.established: incremented when a new connection is established from another cluster. At this point the origin domain of the cluster is not known, so this metric is common for all of them.
  • incoming.first_packet.<host>: incremented when a receiver process gets the first packet from a remote cluster and learns its local domain.
  • incoming.closed.<host>: incremented when an incoming connection gets closed.
  • incoming.errored.<host>: incremented when an incoming connection gets closed with an error.
  • outgoing.established.<host>: incremented when an outgoing connection is established.
  • outgoing.closed.<host>: incremented when an outgoing connection gets closed.
  • outgoing.errored.<host>: incremented when an outgoing connection gets closed with an error.
  • mapping_fetch_time [us]: time spent on fetching an entry from the session table, cached or otherwise.
  • mapping_fetches: number of fetches of session table entries, cached or otherwise.
  • mapping_cache_misses: number of fetches of session table entries that hit the database.
  • delivered_with_ttl: A histogram of packets' TTL values recorded when the global routing layer decides to route them locally (but not due to TTL = 0).
  • stop_ttl_zero: A number of packets that weren't processed by global routing due to TTL=0.
  • bounce_queue_size: a number of messages enqueued for rerouting (the value of this metric is individual per MongooseIM node!).
"},{"location":"modules/mod_global_distrib/#notes","title":"Notes","text":"
  • You should only start mod_global_distrib by configuring it under modules option in mongooseim.toml. Do not add it as host-specific module via host_config.
  • Do not use mod_offline on domains given via global_host or local_host options, as it will decrease messaging robustness; the users logged in other datacenters will not be registered as available by mod_offline, and so the messages will not be flushed.
"},{"location":"modules/mod_global_distrib/#options","title":"Options","text":""},{"location":"modules/mod_global_distrib/#modulesmod_global_distribglobal_host","title":"modules.mod_global_distrib.global_host","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: global_host = \"example.com\"

The XMPP domain that will be shared between datacenters.

Note

This needs to be one of the domains given in general.hosts option in mongooseim.toml.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distriblocal_host","title":"modules.mod_global_distrib.local_host","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: local_host = \"datacenter1.example.com\"

XMPP domain that maps uniquely to the local datacenter; it will be used for inter-center routing.

Note

This needs to be one of the domains given in general.hosts option in mongooseim.toml.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribmessage_ttl","title":"modules.mod_global_distrib.message_ttl","text":"
  • Syntax: non-negative integer
  • Default: 4
  • Example: message_ttl = 5

Number of times a message can be rerouted between datacenters.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribhosts_refresh_interval","title":"modules.mod_global_distrib.hosts_refresh_interval","text":"
  • Syntax: non-negative integer, value given in milliseconds
  • Default: 3000
  • Example: hosts_refresh_interval = 3000

The interval telling how often Redis should be asked if new hosts appeared.

"},{"location":"modules/mod_global_distrib/#connections-options","title":"Connections' options","text":""},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsendpoints","title":"modules.mod_global_distrib.connections.endpoints","text":"
  • Syntax: Array of TOML tables with the following keys: host and port, and the following values: {host = string, port = non_negative_integer}
  • Default: [{host = \"LocalHost\", port = 5555}]
  • Example: endpoints = [{host = \"172.16.0.2\", port = 5555}]

A list of endpoints on which the server will listen for connections. host can be given as a hostname, in which case it will be resolved to an IP address on module start. The endpoint list will be shared with other datacenters via the replicated backend.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsadvertised_endpoints","title":"modules.mod_global_distrib.connections.advertised_endpoints","text":"
  • Syntax: Array of TOML tables with the following keys: host and port, and the following values: {host = string, port = non_negative_integer}
  • Default: not set, the value of endpoints is used (without resolution).
  • Example: advertised_endpoints = [{host = \"172.16.0.2\", port = 5555}]

A list of endpoints which will be advertised in Redis and therefore used to establish connection with this node by other nodes. The host may be either IP or domain, just like in case of endpoints. The difference is, the domain name won't be resolved but inserted directly to the mappings backend instead.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsconnections_per_endpoint","title":"modules.mod_global_distrib.connections.connections_per_endpoint","text":"
  • Syntax: non-negative integer
  • Default: 1
  • Example: connections_per_endpoint = 30

Number of outgoing connections that will be established from the current node to each endpoint assigned to a remote domain.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsendpoint_refresh_interval","title":"modules.mod_global_distrib.connections.endpoint_refresh_interval","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 60
  • Example: endpoint_refresh_interval = 30

An interval between remote endpoint list refresh (and connection rebalancing). A separate timer is maintained for every remote domain.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsendpoint_refresh_interval_when_empty","title":"modules.mod_global_distrib.connections.endpoint_refresh_interval_when_empty","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 3
  • Example: endpoint_refresh_interval_when_empty = 3

Endpoint refresh interval, when array of endpoints is empty.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsdisabled_gc_interval","title":"modules.mod_global_distrib.connections.disabled_gc_interval","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 60
  • Example: disabled_gc_interval = 60

An interval between disabled endpoints \"garbage collection\". It means that disabled endpoints are periodically verified and if Global Distribution detects that connections is no longer alive, the connection pool is closed completely.

"},{"location":"modules/mod_global_distrib/#tls-options","title":"TLS options","text":"

Note

By default tls is disabled and all data will be sent via standard TCP connections.

To enable TLS support, the cacertfile and certfile options have to be present. These options will be passed to the fast_tls driver.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionstlscertfile","title":"modules.mod_global_distrib.connections.tls.certfile","text":"
  • Syntax: string, path in the file system
  • Default: none, this options is mandatory to enable TLS support
  • Example: certfile = \"priv/dc1.pem\"
"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionstlscacertfile","title":"modules.mod_global_distrib.connections.tls.cacertfile","text":"
  • Syntax: string, path in the file system
  • Default: none, this options is mandatory to enable TLS support
  • Example: cacertfile = \"priv/ca.pem\"
"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionstlsciphers","title":"modules.mod_global_distrib.connections.tls.ciphers","text":"
  • Syntax: string
  • Default: \"TLSv1.2:TLSv1.3\"
  • Example: ciphers = \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384\"

Cipher suites to use with StartTLS or TLS. Please refer to the OpenSSL documentation for the cipher string format.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionstlsdhfile","title":"modules.mod_global_distrib.connections.tls.dhfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: dhfile = \"dh.pem\"
"},{"location":"modules/mod_global_distrib/#redis-session-storage-options","title":"Redis session storage options","text":""},{"location":"modules/mod_global_distrib/#modulesmod_global_distribredispool","title":"modules.mod_global_distrib.redis.pool","text":"
  • Syntax: string
  • Default: \"global_distrib\"
  • Example: pool = \"global_distrib\"

Name of the redis pool defined in outgoing pools.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribredisexpire_after","title":"modules.mod_global_distrib.redis.expire_after","text":"
  • Syntax: positive integer
  • Default: 120
  • Example: expire_after = 120

Number of seconds after which a session entry written by this cluster will expire.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribredisrefresh_after","title":"modules.mod_global_distrib.redis.refresh_after","text":"
  • Syntax: non-negative integer
  • Default: 60
  • Example: refresh_after = 60

Number of seconds after which session's expiration timer will be refreshed.

"},{"location":"modules/mod_global_distrib/#database-cache-options","title":"Database cache options","text":"

Options for caching database lookups, by default no options are passed.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribcachecache_missed","title":"modules.mod_global_distrib.cache.cache_missed","text":"
  • Syntax: boolean
  • Default: true
  • Example: cache_missed = true

Determines whether an internal session cache should cache lookup failures. When false, only successful database lookups will result in the value being cached. Changing this option has great negative impact on performance.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribcachedomain_lifetime_seconds","title":"modules.mod_global_distrib.cache.domain_lifetime_seconds","text":"
  • Syntax: non-negative integer, value given in seconds
  • Default: 600
  • Example: domain_lifetime_seconds = 600

How long should subdomain mappings be cached (e.g. muc.example.com -> datacenter1.test).

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribcachejid_lifetime_seconds","title":"modules.mod_global_distrib.cache.jid_lifetime_seconds","text":"
  • Syntax: non-negative integer, value given in seconds
  • Default: 5
  • Example: jid_lifetime_seconds = 5

How long should full and bare JID mappings be cached (e.g. user1@example.com/res1 -> datacenter1.test).

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribcachemax_jids","title":"modules.mod_global_distrib.cache.max_jids","text":"
  • Syntax: non-negative integer
  • Default: 10000
  • Example: max_jids = 10000

The maximum number of JID entries that can be stored in cache at any point in time.

"},{"location":"modules/mod_global_distrib/#message-bouncing-options","title":"Message bouncing options","text":""},{"location":"modules/mod_global_distrib/#modulesmod_global_distribbounceenabled","title":"modules.mod_global_distrib.bounce.enabled","text":"
  • Syntax: boolean
  • Default: true
  • Example: enabled = false

Whether message bouncing should be enabled or not. Setting this option to false makes other bounce options have no effect.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribbounceresend_after_ms","title":"modules.mod_global_distrib.bounce.resend_after_ms","text":"
  • Syntax: non-negative integer
  • Default: 200
  • Example: resend_after_ms = 200

Time after which message will be resent in case of delivery error.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribbouncemax_retries","title":"modules.mod_global_distrib.bounce.max_retries","text":"
  • Syntax: non-negative integer
  • Default: 4
  • Example: max_retries = 4

Number of times message delivery will be retried in case of errors.

"},{"location":"modules/mod_global_distrib/#global-distribution-and-service-discovery","title":"Global Distribution and Service Discovery","text":"

mod_global_distrib extension relies on mod_disco's option users_can_see_hidden_services, when provided. If it is not configured, the default value is true. mod_disco does not have to be enabled for mod_global_distrib to work, as this parameter is used only for processing Disco requests by Global Distribution.

"},{"location":"modules/mod_global_distrib/#example-configuration","title":"Example configuration","text":""},{"location":"modules/mod_global_distrib/#configuring-mod_global_distrib","title":"Configuring mod_global_distrib","text":"
[modules.mod_global_distrib]\n  global_host = \"example.com\"\n  local_host = \"datacenter1.example.com\"\n  connections.endpoints = [{host = \"172.16.0.2\", port = 5555}]\n  connections.advertised_endpoints = [{host = \"172.16.0.2\", port = 5555}]\n  connections.tls.certfile = \"priv/dc1.pem\"\n  connections.tls.cacertfile = \"priv/ca.pem\"\n  connections.connections_per_endpoint = 30\n  cache.domain_lifetime_seconds = 60\n  bounce.resend_after_ms = 300\n  bounce.max_retries = 3\n  redis.pool = \"global_distrib\"\n
"},{"location":"modules/mod_global_distrib/#configuring-dynomite","title":"Configuring Dynomite","text":"

For more information about Dynomite configuration, consult Dynomite wiki.

dyn_o_mite:\n  datacenter: dc1\n  rack: rack1\n  dyn_listen: 172.16.0.3:8101\n  dyn_seeds:\n  - 124.12.4.4:8101:rack1:dc2:1383429731\n  listen: 172.16.0.3:8102\n  servers:\n  - 172.16.0.4:6379:1\n  tokens: '138342973'\n  secure_server_option: datacenter\n  pem_key_file: dynomite.pem\n  data_store: 0\n  stats_listen: 0.0.0.0:22221\n
dyn_o_mite:\n  datacenter: dc2\n  rack: rack1\n  dyn_listen: 124.12.4.4:8101\n  dyn_seeds:\n  - 172.16.0.3:8101:rack1:dc1:1383429731\n  listen: 124.12.4.4:8102\n  servers:\n  - 124.12.4.5:6379:1\n  tokens: '138342973'\n  secure_server_option: datacenter\n  pem_key_file: dynomite.pem\n  data_store: 0\n  stats_listen: 0.0.0.0:22221\n
"},{"location":"modules/mod_http_upload/","title":"mod_http_upload","text":""},{"location":"modules/mod_http_upload/#module-description","title":"Module Description","text":"

This module implements XEP-0363: HTTP File Upload, version 0.3.0+. It enables a service that on user request creates an upload \"slot\". A slot is a pair of URLs, one of which can be used with a PUT method to upload a user's file, the other with a GET method to retrieve such file.

Currently, the module supports only the S3 backend using AWS Signature Version 4.

"},{"location":"modules/mod_http_upload/#options","title":"Options","text":""},{"location":"modules/mod_http_upload/#modulesmod_http_uploadiqdisctype","title":"modules.mod_http_upload.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadhost","title":"modules.mod_http_upload.host","text":"
  • Syntax: string
  • Default: \"upload.@HOST@\"
  • Example: host = \"upload.@HOST@\"

Subdomain for the upload service to reside under. @HOST@ is replaced with each served domain.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadbackend","title":"modules.mod_http_upload.backend","text":"
  • Syntax: non-empty string
  • Default: \"s3\"
  • Example: backend = \"s3\"

Backend to use for generating slots. Currently only \"s3\" can be used.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadexpiration_time","title":"modules.mod_http_upload.expiration_time","text":"
  • Syntax: positive integer
  • Default: 60
  • Example: expiration_time = 120

Duration (in seconds) after which the generated PUT URL will become invalid.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadtoken_bytes","title":"modules.mod_http_upload.token_bytes","text":"
  • Syntax: positive integer
  • Default: 32
  • Example: token_bytes = 32

Number of random bytes of a token that will be used in a generated URL. The text representation of the token will be twice as long as the number of bytes, e.g. for the default value the token in the URL will be 64 characters long.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadmax_file_size","title":"modules.mod_http_upload.max_file_size","text":"
  • Syntax: positive integer
  • Default: not set - no size limit
  • Example: max_file_size = 10485760

Maximum file size (in bytes) accepted by the module.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploads3","title":"modules.mod_http_upload.s3","text":"
  • Syntax: Array of TOML tables. See description.
  • Default: see description
  • Example: see description

Options specific to S3 backend.

Note

This section is mandatory.

"},{"location":"modules/mod_http_upload/#s3-backend-options","title":"S3 backend options","text":""},{"location":"modules/mod_http_upload/#s3bucket_url","title":"s3.bucket_url","text":"
  • Syntax: non-empty string
  • Default: none, this option is mandatory
  • Example: s3.bucket_url = \"https://s3-eu-west-1.amazonaws.com/mybucket\"

A complete URL pointing at the used bucket. The URL may be in virtual host form, and for AWS it needs to point to a specific regional endpoint for the bucket. The scheme, port and path specified in the URL will be used to create PUT URLs for slots, e.g. specifying a value of \"https://s3-eu-west-1.amazonaws.com/mybucket/custom/prefix\" will result in PUT URLs of form \"https://s3-eu-west-1.amazonaws.com/mybucket/custom/prefix/<RANDOM_TOKEN>/<FILENAME>?<AUTHENTICATION_PARAMETERS>\".

"},{"location":"modules/mod_http_upload/#s3add_acl","title":"s3.add_acl","text":"
  • Syntax: boolean
  • Default: false
  • Example: s3.add_acl = true

If true, adds x-amz-acl: public-read header to the PUT URL. This allows users to read the uploaded files even if the bucket is private. The same header must be added to the PUT request.

"},{"location":"modules/mod_http_upload/#s3region","title":"s3.region","text":"
  • Syntax: string
  • Default: \"\", this option is mandatory
  • Example: s3.region = \"https://s3-eu-west-1.amazonaws.com/mybucket\"

The AWS region to use for requests.

"},{"location":"modules/mod_http_upload/#s3access_key_id","title":"s3.access_key_id","text":"
  • Syntax: string
  • Default: \"\", this option is mandatory
  • Example: s3.access_key_id = \"AKIAIOSFODNN7EXAMPLE\"

ID of the access key to use for authorization.

"},{"location":"modules/mod_http_upload/#s3secret_access_key","title":"s3.secret_access_key","text":"
  • Syntax: string
  • Default: \"\", this option is mandatory
  • Example: s3.secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"

Secret access key to use for authorization.

"},{"location":"modules/mod_http_upload/#example-configuration","title":"Example configuration","text":"
[modules.mod_http_upload]\n  host = \"upload.@HOST@\"\n  backend = \"s3\"\n  expiration_time = 120\n  s3.bucket_url = \"https://s3-eu-west-1.amazonaws.com/mybucket\"\n  s3.region = \"eu-west-1\"\n  s3.add_acl = true     \n  s3.access_key_id = \"AKIAIOSFODNN7EXAMPLE\"\n  s3.secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\n
"},{"location":"modules/mod_http_upload/#testing-s3-configuration","title":"Testing S3 configuration","text":"

Since there is no direct connection between MongooseIM and an S3 bucket, it is not possible to verify the provided S3 credentials during startup. However, the testing can be done manually. MongooseIM provides a dedicated mongooseimctl httpUpload getUrl command for the manual URLs generation. It requires the following arguments:

  • domain - XMPP host name.
  • filename - Name of the file.
  • size - Size of the file in bytes (positive integer).
  • contentType - Content-Type.
  • timeout - Duration (in seconds, positive integer) after which the generated PUT URL will become invalid. This argument shadows the expiration_time configuration.

The generated URLs can be used to upload/download a file using the curl utility:

# Create some text file\necho qwerty > tmp.txt\n\n# Get the size of the file\nfilesize=\"$(wc -c tmp.txt | awk '{print $1}')\"\n\n# Set the content type\ncontent_type=\"text/plain\"\n\n# Generate upload/download URLs\nurls=\"$(./mongooseimctl httpUpload getUrl --domain localhost --filename test.txt --size \"$filesize\" --contentType \"$content_type\" --timeout 600)\"\nput_url=\"$(echo \"$urls\" | awk '/PutURL:/ {print $2}')\"\nget_url=\"$(echo \"$urls\" | awk '/GetURL:/ {print $2}')\"\n\n# Try to upload a file. Note that if 'add_acl' option is\n# enabled, then you must also add 'x-amz-acl' header:\n#    -H \"x-amz-acl: public-read\"\ncurl -v -T \"./tmp.txt\" -H \"Content-Type: $content_type\" \"$put_url\"\n\n# Try to download a file\ncurl -i \"$get_url\"\n
"},{"location":"modules/mod_http_upload/#using-s3-backend-with-minio","title":"Using S3 backend with min.io","text":"

min.io doesn't support ObjectACL, so enabling add_acl makes no sense. The bucket policies must be used instead, it is enough to set the bucket policy to download.

Please note that there is no error if you keep add_acl enabled. min.io just ignores the x-amz-acl header. This might be useful to simplify the migration from S3 to min.io

"},{"location":"modules/mod_http_upload/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) create_slot An upload slot is allocated."},{"location":"modules/mod_inbox/","title":"mod_inbox","text":""},{"location":"modules/mod_inbox/#module-description","title":"Module Description","text":"

Inbox is an experimental feature implemented as a few separate modules. It is described in detail as our Open XMPP Extension. To use it, enable mod_inbox in the config file.

"},{"location":"modules/mod_inbox/#options","title":"Options","text":""},{"location":"modules/mod_inbox/#modulesmod_inboxbackend","title":"modules.mod_inbox.backend","text":"
  • Syntax: string, one of \"rdbms\", \"rdbms_async\"
  • Default: \"rdbms\"
  • Example: backend = \"rdbms_async\"

Only RDBMS storage is supported, but rdbms means flushes to DB are synchronous with each message, while rdbms_async is instead asynchronous.

Regular rdbms has worse performance characteristics, but it has better consistency properties, as events aren't lost nor reordered. rdbms_async processes events asynchronously and potentially unloading a lot of aggregation from the DB. Like the case of the asynchronous workers for MAM, it is the preferred method, with the risk messages being lost on an ungraceful shutdown.

"},{"location":"modules/mod_inbox/#modulesmod_inboxasync_writerpool_size","title":"modules.mod_inbox.async_writer.pool_size","text":"
  • Syntax: non-negative integer
  • Default: 2 * erlang:system_info(schedulers_online)
  • Example: modules.mod_inbox.async_writer.pool_size = 32

Number of workers in the pool. More than the number of available schedulers is recommended, to minimise lock contention on the message queues, and more than the number of DB workers, to fully utilise the DB capacity. How much more than these two parameters is then a good fine-tuning for specific deployments.

"},{"location":"modules/mod_inbox/#modulesmod_inboxboxes","title":"modules.mod_inbox.boxes","text":"
  • Syntax: array of strings.
  • Default: []
  • Example: [\"classified\", \"spam\"]

A list of supported inbox boxes by the server. This can be used by clients to classify their inbox entries in any way that fits the end-user. The strings provided here will be used verbatim in the IQ query as described in Inbox \u2013 Filtering and Ordering.

Note

inbox, archive, and bin are reserved box names and are always enabled, therefore they don't need to \u2013and must not\u2013 be specified in this section. all has a special meaning in the box query and therefore is also not allowed as a box name.

If the asynchronous backend is configured, automatic removals become moves to the bin box, also called \"Trash bin\". This is to ensure eventual consistency. Then the bin can be emptied, either on a user request, with the mongooseimctl inbox command, through the GraphQL API, or through the REST API.

"},{"location":"modules/mod_inbox/#modulesmod_inboxbin_ttl","title":"modules.mod_inbox.bin_ttl","text":"
  • Syntax: non-negative integer, expressed in days.
  • Default: 30
  • Example: modules.mod_inbox.bin_ttl = 7

How old entries in the bin can be before the automatic bin cleaner collects them. A value of 7 would mean that entries that have been in the bin for more than 7 days will be cleaned on the next bin collection.

"},{"location":"modules/mod_inbox/#modulesmod_inboxbin_clean_after","title":"modules.mod_inbox.bin_clean_after","text":"
  • Syntax: non-negative integer, expressed in hours
  • Default: 1
  • Example: modules.mod_inbox.bin_clean_after = 24

How often the automatic garbage collection runs over the bin.

"},{"location":"modules/mod_inbox/#modulesmod_inboxdelete_domain_limit","title":"modules.mod_inbox.delete_domain_limit","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: modules.mod_inbox.delete_domain_limit = 10000

Domain deletion can be an expensive operation, as it requires to delete potentially many thousands of records from the DB. By default, the delete operation deletes everything in a transaction, but it might be desired, to handle timeouts and table locks more gracefully, to delete the records in batches. This limit establishes the size of the batch.

Note

Not supported by MSSQL.

"},{"location":"modules/mod_inbox/#modulesmod_inboxreset_markers","title":"modules.mod_inbox.reset_markers","text":"
  • Syntax: array of strings, out of \"displayed\", \"received\", \"acknowledged\"
  • Default: [\"displayed\"]
  • Example: reset_markers = [\"received\"]

List of chat markers that when sent, will reset the unread message counter for a conversation. This works when Chat Markers are enabled on the client side. Setting as empty list (not recommended) means that no chat marker can decrease the counter value.

"},{"location":"modules/mod_inbox/#modulesmod_inboxgroupchat","title":"modules.mod_inbox.groupchat","text":"
  • Syntax: array of strings
  • Default: [\"muclight\"]
  • Example: groupchat = [\"muclight\"]

The list indicating which groupchats will be included in inbox. Possible values are muclight Multi-User Chat Light or muc Multi-User Chat.

"},{"location":"modules/mod_inbox/#modulesmod_inboxaff_changes","title":"modules.mod_inbox.aff_changes","text":"
  • Syntax: boolean
  • Default: true
  • Example: aff_changes = true

Use this option when muclight is enabled. Indicates if MUC Light affiliation change messages should be included in the conversation inbox. Only changes that affect the user directly will be stored in their inbox.

"},{"location":"modules/mod_inbox/#modulesmod_inboxremove_on_kicked","title":"modules.mod_inbox.remove_on_kicked","text":"
  • Syntax: boolean
  • Default: true
  • Example: remove_on_kicked = true

Use this option when muclight is enabled. If true, the inbox conversation is removed for a user when they are removed from the groupchat.

"},{"location":"modules/mod_inbox/#modulesmod_inboxiqdisctype","title":"modules.mod_inbox.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_inbox/#modulesmod_inboxmax_result_limit","title":"modules.mod_inbox.max_result_limit","text":"
  • Syntax: the string \"infinity\" or a positive integer
  • Default: \"infinity\"
  • Example: modules.mod_inbox.max_result_limit = 100

This option sets the maximum size of returned results when quering inbox. It works in the same manner as setting a limit in iq stanza. The special value infinity means no limit.

"},{"location":"modules/mod_inbox/#note-about-supported-rdbms","title":"Note about supported RDBMS","text":"

mod_inbox executes upsert queries, which have different syntax in every supported RDBMS. Inbox currently supports the following DBs:

  • MySQL via native driver
  • PgSQL via native driver
  • MSSQL via ODBC driver
"},{"location":"modules/mod_inbox/#legacy-muc-support","title":"Legacy MUC support","text":"

Inbox comes with support for the legacy MUC as well. It stores all groupchat messages sent to room in each sender's and recipient's inboxes and private messages. Currently it is not possible to configure it to store system messages like subject or affiliation change.

"},{"location":"modules/mod_inbox/#example-configuration","title":"Example configuration","text":"
[modules.mod_inbox]\n  backend = \"rdbms_async\"\n  reset_markers = [\"displayed\"]\n  aff_changes = true\n  remove_on_kicked = true\n  groupchat = [\"muclight\"]\n
"},{"location":"modules/mod_jingle_sip/","title":"mod_jingle_sip","text":""},{"location":"modules/mod_jingle_sip/#module-description","title":"Module Description","text":"

This module enables Jingle to SIP and SIP to Jingle translation. When this module is enabled, MongooseIM will intercept any Jingle IQ set stanza with action:

  • session-initiate
  • session-terminate
  • session-accept
  • transport-info

and translate it to SIP messages with appropriate SDP content based on the details in the Jingle stanza.

The translation back from SIP to Jingle is done for the following SIP requests:

  • INVITE
  • re-INVITE - INVITE message sent for an accepted session
  • CANCEL
  • BYE
  • INFO

and following responses to the INVITE request:

  • 200 when the call invite was accepted
  • 180 and 183 to indicate that the invitation was sent to the device
  • 486 when the call's recipient rejects it
  • from 400 to 600 - other error codes indicating session termination
"},{"location":"modules/mod_jingle_sip/#jingle-to-sip-translation","title":"Jingle to SIP translation","text":"

The table below summarises the bilateral translation for standard Jingle and SIP messages:

Jingle action SIP message comment session-initiate INVITE request session-accept 200 OK response session-terminate with reason success BYE request Only for accepted session session-terminate with reason decline CANCEL request When sent by call's initiator session-terminate with reason decline 486 Busy Here response When sent by the invite user transport-info INFO request"},{"location":"modules/mod_jingle_sip/#ringing-notification","title":"Ringing notification","text":"

Both Jingle and SIP have the ringing notification. It's generated as a response code 180 Ringing by a SIP entity when the INVITE is sent to the device. In SIP world a 183 Session Progress response code is also generated in some cases. Both 180 and 183 codes are translated as session-info Jingle stanza with ringing sub element. MongooseIM generates only 180 Ringing response code the INVITE request, if the recipient's online. If the recipient is online, MongooseIM generates the 180 Ringing response code to the INVITE request.

"},{"location":"modules/mod_jingle_sip/#recipient-unavailable","title":"Recipient unavailable","text":"

When MongooseIM receives a SIP INVITE request addressed to an offline user, it replies with a 480 Temporarily Unavailable code. The same code is expected from the SIP Proxy when MongooseIM sends the INVITE request.

"},{"location":"modules/mod_jingle_sip/#other-error-codes","title":"Other error codes","text":"

When an error response to the INVITE request is from the range 400 to 699 but not 486, MongooseIM will send a Jingle session-terminate stanza to the call's initiator. The stanza has reason general-error with the SIP error code in the sip-error sub element.

"},{"location":"modules/mod_jingle_sip/#non-standard-jingle-stanzas-used-by-jinglejs","title":"Non-standard Jingle stanzas used by jingle.js","text":"

The following non-standard Jingle stanzas were integrated with Jingle.js:

  • source-remove
  • source-add
  • source-update

When MongooseIM observes the above Jingle stanzas, it will translate them to a SIP in-dialog INVITE request. In the SDP content of the request, there will be a custom attribute a=jingle-action. The value of the custom attribute is one of the three presented above.

Similarly, when MongooseIM gets a SIP in-dialog INVITE request, it will check if there is a custom attribute and use it as the action attribute of the Jingle stanza sent to the user. If there is no such attribute, the action will be set to regular Jingle transport-info.

"},{"location":"modules/mod_jingle_sip/#non-standard-jingle-existing-session-initiate-stanza","title":"Non-standard Jingle existing-session-initiate stanza","text":"

MongooseIM allows a user to ask for an unanswered session-initiate request. This may be useful in web applications when there is a need to handle the call in a new browser window.

In order to get the session-initiate, which was not answered yet, the user can send a get Jingle stanza to self with action set to existing-session-initiate. As a result, MongooseIM will resend the original session-initiate request to the device which sent the query.

"},{"location":"modules/mod_jingle_sip/#prerequisites","title":"Prerequisites","text":"

By default, MongooseIM is built without SIP support. In order to build the server with SIP support, please use tools/configure script before the release generation. You may either pick only certain drivers (with SIP included) or simply use with-all option. Examples:

tools/configure with-mysql with-jingle-sip\ntools/configure with-all without-odbc\ntools/configure with-all\n

MongooseIM packages are built with Jingle/SIP support.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_jingle_sip/#options","title":"Options","text":""},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipbackend","title":"modules.mod_jingle_sip.backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: backend = \"cets\"

Backend for in-memory data for this module.

Warning

The corresponding internal database has to be enabled.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipproxy_host","title":"modules.mod_jingle_sip.proxy_host","text":"
  • Syntax: string
  • Default: \"localhost\"
  • Example: proxy_host = \"localhost\"

The name or IP address of the SIP Proxy to which MongooseIM will send SIP messages.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipproxy_port","title":"modules.mod_jingle_sip.proxy_port","text":"
  • Syntax: non-negative integer
  • Default: 5060
  • Example: proxy_port = 5060

The port of the SIP Proxy.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_siplisten_port","title":"modules.mod_jingle_sip.listen_port","text":"
  • Syntax: non-negative integer
  • Default: 5600
  • Example: listen_port = 5600

The port on which MongooseIM will listen for incoming SIP messages.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_siplocal_host","title":"modules.mod_jingle_sip.local_host","text":"
  • Syntax: string
  • Default: \"localhost\"
  • Example: local_host = \"localhost\"

The value used to create SIP URIs (including VIA headers).

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipsdp_origin","title":"modules.mod_jingle_sip.sdp_origin","text":"
  • Syntax: string
  • Default: \"127.0.0.1\"
  • Example: sdp_origin = \"127.0.0.1\"

The value of the c= SDP attribute.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_siptransport","title":"modules.mod_jingle_sip.transport","text":"
  • Syntax: string
  • Default: \"udp\"
  • Example: transport = \"tcp\"

The SIP transport parameter used when calling the proxy.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipusername_to_phone","title":"modules.mod_jingle_sip.username_to_phone","text":"
  • Syntax: Array of TOML tables with the following keys: username and phone, and string values
  • Default: []
  • Example: username_to_phone = [{username = \"2000006168\", phone = \"+919177074440\"}]

Allows mapping JIDs to phone numbers and vice versa.

The simplest configuration is the following:

[modules.mod_jingle_sip]\n

With this configuration MongooseIM will try sending SIP messages to a SIP proxy listening on localhost and port 5600.

"},{"location":"modules/mod_jingle_sip/#use-cases-covered-by-tests","title":"Use cases covered by tests","text":"

Currently to test the functionality we use a SIP Proxy mock written in Erlang. The following scenarios are covered by our tests in big_tests/tests/jingle_SUITE.erl

All the sequence diagrams where generated with textart.io/sequence. The source code is embedded in the markdown file below every diagram inside a comment <!--- --->

"},{"location":"modules/mod_jingle_sip/#1-establishing-a-session-with-another-xmpp-user","title":"1. Establishing a session with another XMPP user","text":"

With the mod_jingle_sip enabled, all Jingle IQ set stanzas listed above are intercepted, translated to SIP packets and sent to a SIP Proxy. This means that the current implementation will also translate stanzas addressed to a user in the same domain. This allows the SIP entity to control how the call between XMPP users is established. Below there are sequence diagrams showing the communication between XMPP users, MongooseIM and SIP Proxy as in our tests. It's possible that the SIP Proxy or other SIP entity decides that the call needs to be forked and delivered to the user's phone number instead of generating a corresponding call back to MongooseIM.

"},{"location":"modules/mod_jingle_sip/#11-signaling-session-initiate-to-other-xmpp-user-via-sip-proxy","title":"1.1 Signaling session-initiate to other XMPP user via SIP Proxy","text":"
+-------+                       +-------------+       +-----------+                   +-------+\n| UserA |                       | MongooseIM  |       | SIPProxy  |                   | UserB |\n+-------+                       +-------------+       +-----------+                   +-------+\n    |                                  |                    |                             |\n    | session-initiate to UserB        |                    |                             |\n    |--------------------------------->|                    |                             |\n    | -------------------------\\       |                    |                             |\n    |-| Jingle stanza          |       |                    |                             |\n    | | action:session-initate |       |                    |                             |\n    | | sid: 123               |       |                    |                             |\n    | |------------------------|       | SIP INVITE         |                             |\n    |                                  |------------------->|                             |\n    |                                  | -------------\\     |                             |\n    |                                  |-| from:UserA |     |                             |\n    |                                  | | to:UserB   |     |                             |\n    |                                  | | sid: 123   |     |                             |\n    |                                  | |------------|     | create new call             |\n    |                                  |                    |----------------             |\n    |                                  |                    |               |             |\n    |                                  |                    |<---------------             |\n    |                                  |                    | ------------------------\\   |\n    |                                  |                    |-| SDP content can be    |   |\n    |                                  |                    | | changed for instance  |   |\n    |                                  |                    | | to inject a transport |   |\n    |                                  |         SIP INVITE | | canidate              |   |\n    |                                  |<-------------------| |-----------------------|   |\n    |                                  |     -------------\\ |                             |\n    |                                  |     | from:UserA |-|                             |\n    |                                  |     | to:UserB   | |                             |\n    |            --------------------\\ |     | sid:456    | |                             |\n    |            | yes, new SID: 456 |-|     |------------| |                             |\n    |            |-------------------| |                    |                             |\n    |                                  |                    |                             |\n    |                                  | session-initiate to UserB                        |\n    |                                  |------------------------------------------------->|\n    |                                  |                    |                             |\n
"},{"location":"modules/mod_jingle_sip/#12-signaling-session-accept-to-other-xmpp-user-via-sip-proxy","title":"1.2 Signaling session-accept to other XMPP user via SIP Proxy","text":"

When the other user accepts the call invite sent by the first, the following sequence is executed. This is a continuation of the previous example

+-------+                       +-------------+        +-----------+                   +-------+\n| UserA |                       | MongooseIM  |        | SIPProxy  |                   | UserB |\n+-------+                       +-------------+        +-----------+                   +-------+\n    |                                  |                     |                             |\n    |                                  |                     |     session-accpet to UserA |\n    |                                  |<--------------------------------------------------|\n    |                                  |                     |   ------------------------\\ |\n    |                                  |                     |   | Jingle stanza         |-|\n    |                                  |                     |   | action:session-accept | |\n    |                                  |                     |   | sid: 456              | |\n    |                                  | 200 OK              |   |-----------------------| |\n    |                                  |-------------------->|                             |\n    |                                  | --------------\\     |                             |\n    |                                  |-| from: UserA |     |                             |\n    |                                  | | to: UserB   |     |                             |\n    |                                  | | sid: 456    |     |                             |\n    |                                  | |-------------|     | find corresponding call     |\n    |                                  |                     |------------------------     |\n    |                                  |                     |                       |     |\n    |                                  |                     |<-----------------------     |\n    |                                  |                     |                             |\n    |                                  |              200 OK |                             |\n    |                                  |<--------------------|                             |\n    |                                  |     --------------\\ |                             |\n    |                                  |     | from: UserA |-|                             |\n    |                                  |     | to: UserB   | |                             |\n    |                                  |     | sid: 123    | |                             |\n    |        session-accept from UserB |     |-------------| |                             |\n    |<---------------------------------|                     |                             |\n    |                                  |                     |                             |\n
"},{"location":"modules/mod_jingle_sip/#13-terminating-a-call","title":"1.3 Terminating a call","text":"

Any Jingle session (accepted or not) can be terminated by sending a Jingle stanza with action session-terminate and a reason. In the SIP world it's more complex. See the following examples for more information.

"},{"location":"modules/mod_jingle_sip/#131-terminating-an-accepted-call","title":"1.3.1 Terminating an accepted call","text":"

The easiest scenario is when the call was accepted as in 1.2. In this case one of the users sends a session-terminate Jingle action with a reason success. This is translated to a SIP BYE request with to and from headers set appropriately - from is the user who wants to terminate the call and to is the user on the other end of the session. The BYE request is sent to the SIP Proxy and then to the other user in a similar way to session acceptance.

"},{"location":"modules/mod_jingle_sip/#132-terminating-an-unanswered-call-by-initiator","title":"1.3.2 Terminating an unanswered call by initiator","text":"

To terminate the call before it's accepted, the initiator sends a Jingle session-terminate stanza with a reason decline. Then MongooseIM translates this to a SIP CANCEL request which is sent to the SIP Proxy.

"},{"location":"modules/mod_jingle_sip/#133-rejecting-the-call","title":"1.3.3 Rejecting the call","text":"

When the invitee wants to terminate the call, on the XMPP level this is also a Jingle session-terminate stanza with a reason decline. MongooseIM translates this to SIP 486 Busy Here Response (because this is a response to the invite request).

"},{"location":"modules/mod_jingle_sip/#2-establishing-a-session-with-a-sip-user","title":"2. Establishing a session with a SIP user","text":"

Establishing a session with a SIP user (or a SIP entity) works the same as in the previous section. The only difference is that the SIP Proxy will not call MongooseIM back (as it may happen for call to other XMPP user). Instead the SIP message sent by MongooseIM to SIP Proxy will be delivered directly to the SIP user's device.

"},{"location":"modules/mod_keystore/","title":"mod_keystore","text":""},{"location":"modules/mod_keystore/#module-description","title":"Module Description","text":"

mod_keystore serves as storage for crypto keys - it doesn't implement any XMPP-level protocol. The module can store transient RAM-only keys generated on module startup, stored in memory only, distributed to all cluster members and existing for only as long as the cluster is alive, as well as predefined and pre-shared keys which can be read from a file.

RAM-only keys provide better security since they are never written to persistent storage, at the cost of loss in case of a cluster-global failure or restart.

As of now mod_auth_token is the only module dependent on mod_keystore.

It's crucial to understand the distinction between single-tenant and multi-tenant hosting scenarios. In a multi-tenant server mod_keystore must be configured separately for each virtual XMPP domain to avoid sharing keys between domains!

"},{"location":"modules/mod_keystore/#options","title":"Options","text":""},{"location":"modules/mod_keystore/#modulesmod_keystoreram_key_size","title":"modules.mod_keystore.ram_key_size","text":"
  • Syntax: non-negative integer
  • Default: 2048
  • Example: ram_key_size = 10000

Size to use when generating RAM-only keys (designated by type ram).

"},{"location":"modules/mod_keystore/#modulesmod_keystorekeys","title":"modules.mod_keystore.keys","text":"
  • Syntax: Array of TOML tables with the following keys: \"name\", \"type\", \"path\", and following values: {name = string, type = values: \"file\", \"ram\", path = string}.
  • Default: []
  • Example: modules.mod_keystore.keys = [{name = \"access_psk\", type = \"file\", path = \"priv/access_psk\"}]

Names, types, and optional filepaths of the keys.

"},{"location":"modules/mod_keystore/#api","title":"API","text":"

The module public API is hook-based:

mongoose_hooks:get_key(Domain, [], KeyName).\n

An example of usage can be found in mod_auth_token:get_key_for_user/2.

"},{"location":"modules/mod_keystore/#example-configuration","title":"Example Configuration","text":"

Simple configuration - single tenant (i.e. server hosting just one XMPP domain):

[modules.mod_keystore]\n  keys = [{name = \"access_secret\", type = \"ram\"},\n          {name = \"access_psk\", type = \"file\", path = \"priv/access_psk\"},\n          {name = \"provision_psk\", type = \"file\", path = \"priv/provision_psk\"}]\n

Multi-tenant setup (mod_keystore configured differently for each virtual XMPP domain):

[[host_config]]\n  host = \"first.com\"\n\n  [host_config.modules.mod_keystore]\n    keys = [{name = \"access_secret\", type = \"ram\"},\n            {name = \"access_psk\", type = \"file\", path = \"priv/first_access_psk\"},\n            {name = \"provision_psk\", type = \"file\", path = \"priv/first_provision_psk\"}]\n\n[[host_config]]\n  host = \"second.com\"\n\n  [host_config.modules.mod_keystore]\n    keys = [{name = \"access_secret\", type = \"ram\"},\n            {name = \"access_psk\", type = \"file\", path = \"priv/second_access_psk\"},\n            {name = \"provision_psk\", type = \"file\", path = \"priv/second_provision_psk\"}]\n
"},{"location":"modules/mod_last/","title":"mod_last","text":""},{"location":"modules/mod_last/#module-description","title":"Module Description","text":"

Implements XEP-0012: Last Activity.

Use with caution, as it was observed that a user disconnect spike might result in overloading the database with \"last activity\" writes.

"},{"location":"modules/mod_last/#options","title":"Options","text":""},{"location":"modules/mod_last/#modulesmod_lastiqdisctype","title":"modules.mod_last.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_last/#modulesmod_lastbackend","title":"modules.mod_last.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Storage backend.

"},{"location":"modules/mod_last/#example-configuration","title":"Example Configuration","text":"
[modules.mod_last]\n  backend = \"rdbms\"\n
"},{"location":"modules/mod_last/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) get_last A timestamp is fetched from DB. set_last_info A timestamp is stored in DB."},{"location":"modules/mod_mam/","title":"mod_mam","text":""},{"location":"modules/mod_mam/#module-description","title":"Module Description","text":"

This module implements XEP-0313: Message Archive Management. It enables a service to store all user messages for one-to-one chats as well as group chats (MUC, MultiUser Chat). It uses XEP-0059: Result Set Management for paging. It is a highly customizable module, that requires some skill and knowledge to operate properly and efficiently.

MongooseIM is compatible with MAM 0.4-1.1.0.

Configure MAM with different storage backends:

  • RDBMS (databases like MySQL, PostgreSQL, MS SQL Server)
  • Cassandra (NoSQL)
  • ElasticSearch (NoSQL)

mod_mam is a meta-module that ensures all relevant mod_mam_* modules are loaded and properly configured.

"},{"location":"modules/mod_mam/#message-retraction","title":"Message retraction","text":"

This module supports XEP-0424: Message Retraction with RDBMS storage backends. When a retraction message is received, the MAM module finds the message to retract and replaces it with a tombstone.

The following criteria are used to find the original message:

  • The id attribute specified in the apply-to element of the retraction message has to be the same as the id attribute of the origin-id (or stanza-id when configured, see below) element of the original message.
  • Both messages need to originate from the same user.
  • Both messages need to be addressed to the same user.

If more than one message matches the criteria, only the most recent one is retracted. To avoid this case, it is recommended to use a unique identifier (UUID) as the origin ID.

"},{"location":"modules/mod_mam/#retraction-on-the-stanza-id","title":"Retraction on the stanza-id","text":"

This module also implements an extension to the XEP, where it allows to specify the stanza-id as created by the server's MAM, instead of the origin-id that the original XEP-0424 specifies. It announces this capability under the namespace urn:esl:message-retract-by-stanza-id:0. This is specially useful in groupchats where the stanza-id of a message is shared and known for all participants.

In this case, to use such functionality,

<apply-to id=\"origin-id-1\" xmlns=\"urn:xmpp:fasten:0\">\n  <retract xmlns='urn:xmpp:message-retract:0'/>\n</apply-to>\n
turns into
<apply-to id=\"stanza-id-1\" xmlns=\"urn:xmpp:fasten:0\">\n  <retract xmlns='urn:esl:message-retract-by-stanza-id:0'/>\n</apply-to>\n
and likewise, the answer would be tagged by the mentioned esl namespace.

"},{"location":"modules/mod_mam/#full-text-search","title":"Full Text Search","text":"

This module allows message filtering by their text body (if enabled, see Common backend options). This means that an XMPP client, while requesting messages from the archive may not only specify standard form fields (with, start, end), but also full-text-search (of type text-single). If this happens, the client will receive only messages that contain words specified in the request.

The exact behaviour, like whether word ordering matters, may depend on the storage backend in use. For now rdbms backend has very limited support for this feature, while cassandra does not support it at all. elasticsearch backend, on the other hand, should provide you with the best results when it comes to text filtering.

mod_mam_rdbms_arch returns all messages that contain all search words, order of words does not matter. Messages are sorted by timestamp (not by relevance).

"},{"location":"modules/mod_mam/#note-on-full-text-search-with-elasticsearch-backend","title":"Note on full text search with ElasticSearch backend","text":"

When using ElasticSearch MAM backend, the value provided in full-text-search form field will be passed to ElasticSearch as Simple Search Query. If you're using our official ElasticSearch mappings from priv/elasticsearch then the query analyzer is set to english. Also note that the default separator for the search query is AND (which roughly means that ElasticSearch will search for messages containing all the terms provided in the query string).

"},{"location":"modules/mod_mam/#options","title":"Options","text":""},{"location":"modules/mod_mam/#modulesmod_mambackend","title":"modules.mod_mam.backend","text":"
  • Syntax: string, one of \"rdbms\", \"cassandra\" and \"elasticsearch\"
  • Default: \"rdbms\"
  • Example: backend = \"elasticsearch\"

Database backend to use.

"},{"location":"modules/mod_mam/#modulesmod_mamno_stanzaid_element","title":"modules.mod_mam.no_stanzaid_element","text":"
  • Syntax: boolean
  • Default: false
  • Example: no_stanzaid_element = true

Do not add a <stanza-id/> element from MAM v1.1.0.

"},{"location":"modules/mod_mam/#modulesmod_mamis_archivable_message","title":"modules.mod_mam.is_archivable_message","text":"
  • Syntax: non-empty string
  • Default: \"mod_mam_utils\"
  • Example: is_archivable_message = \"mod_mam_utils\"

Name of a module implementing is_archivable_message/3 callback that determines if the message should be archived.

"},{"location":"modules/mod_mam/#modulesmod_mamsend_message","title":"modules.mod_mam.send_message","text":"
  • Syntax: non-empty string
  • Default: \"mod_mam_utils\"
  • Example: send_message = \"mod_mam_utils\"

Name of a module implementing send_message/4 callback that routes a message during lookup operation. Consult with mod_mam_utils:send_message/4 code for more information.

Check big_tests/tests/mam_send_message_SUITE_data/mam_send_message_example.erl file in the MongooseIM repository for the usage example.

"},{"location":"modules/mod_mam/#modulesmod_mamarchive_chat_markers","title":"modules.mod_mam.archive_chat_markers","text":"
  • Syntax: boolean
  • Default: false
  • Example: archive_chat_markers = true

If set to true, XEP-0333 chat markers will be archived. See more details here.

"},{"location":"modules/mod_mam/#modulesmod_mammessage_retraction","title":"modules.mod_mam.message_retraction","text":"
  • Syntax: boolean
  • Default: true
  • Example: message_retraction = false

Enables XEP-0424: Message Retraction. This functionality is currently implemented only for the rdbms backend. Retraction messages are always archived regardless of this option.

backend, no_stanzaid_element, is_archivable_message and message_retraction will be applied to both pm and muc (if they are enabled), unless overridden explicitly (see example below).

"},{"location":"modules/mod_mam/#enable-one-to-one-message-archive","title":"Enable one-to-one message archive","text":"

Archive for one-to-one messages can be enabled in one of two ways:

  • Specify [mod_mam.pm] section
[modules.mod_mam]\n[modules.mod_mam.pm] # defining this section enables PM support\n
  • Define any PM related option
[modules.mod_mam]\n  pm.backend = \"rdbms\" # enables PM support and overrides its backend\n
"},{"location":"modules/mod_mam/#disable-one-to-one-message-archive","title":"Disable one-to-one message archive","text":"

To disable archive for one-to-one messages please remove PM section or any PM related option from the config file.

"},{"location":"modules/mod_mam/#pm-specific-options","title":"PM-specific options","text":""},{"location":"modules/mod_mam/#modulesmod_mampmarchive_groupchats","title":"modules.mod_mam.pm.archive_groupchats","text":"
  • Syntax: boolean
  • Default: false
  • Example: modules.mod_mam.pm.archive_groupchats = true

When enabled, MAM will store groupchat messages in recipients' individual archives. USE WITH CAUTION! May increase archive size significantly. Disabling this option for existing installation will neither remove such messages from MAM storage, nor will filter out them from search results. Clients can use include-groupchat filter to filter out groupchat messages while querying the archive.

Warning

The include-groupchat filter doesn't work for Cassandra backend.

"},{"location":"modules/mod_mam/#modulesmod_mampmsame_mam_id_for_peers","title":"modules.mod_mam.pm.same_mam_id_for_peers","text":"
  • Syntax: boolean
  • Default: false
  • Example: modules.mod_mam.pm.same_mam_id_for_peers = true

When enabled, MAM will set the same MAM ID for both sender and recipient. This can be useful in combination with retraction on the stanza-id. Note that this might not work with clients across federation, as the recipient might not implement the same retraction, nor the same IDs.

"},{"location":"modules/mod_mam/#enable-muc-message-archive","title":"Enable MUC message archive","text":"

Archive for MUC messages can be enabled in one of two ways:

  • Specify [mod_mam.muc] section
[modules.mod_mam]\n[modules.mod_mam.muc] # defining this section enables MUC support\n
  • Define any MUC related option
[modules.mod_mam]\n  muc.backend = \"rdbms\" # enables MUC support and overrides its backend\n
"},{"location":"modules/mod_mam/#disable-muc-message-archive","title":"Disable MUC message archive","text":"

To disable archive for MUC messages please remove MUC section or any MUC related option from the config file.

"},{"location":"modules/mod_mam/#muc-specific-options","title":"MUC-specific options","text":""},{"location":"modules/mod_mam/#modulesmod_mammuchost","title":"modules.mod_mam.muc.host","text":"
  • Syntax: string
  • Default: \"conference.@HOST@\"
  • Example: modules.mod_mam.muc.host = \"conference.@HOST@\"

The MUC host that will be archived if MUC archiving is enabled.

Warning

If you are using MUC Light, make sure this option is set to the MUC Light domain

"},{"location":"modules/mod_mam/#example","title":"Example","text":"

The example below presents how to override common option for muc module specifically. Please note that you can override all common options (except cache) in a similar way.

[modules.mod_mam]\n  backend = \"rdbms\"\n  async_writer.enabled = true # this option enables async writer for RDBMS backend\n  muc.async_writer.enabled = false # disable async writer for MUC archive only\n
"},{"location":"modules/mod_mam/#rdbms-backend-options","title":"RDBMS backend options","text":"

These options will only have effect when the rdbms backend is used:

"},{"location":"modules/mod_mam/#modulesmod_mamcache_users","title":"modules.mod_mam.cache_users","text":"
  • Syntax: boolean
  • Default: true
  • Example: modules.mod_mam.cache_users = false

Enables Archive ID to integer mappings cache.

If caching is enabled, by default it will spawn its own segmented cache cache, with defaults as in mod_cache_users. To change these defaults, the same config can be accessed within the cache key. To see details about the meaning of each flag, see mod_cache_users. To reuse the cache already created by mod_cache_users, see the option below.

modules.mod_mam.cache.strategy\nmodules.mod_mam.cache.time_to_live\nmodules.mod_mam.cache.number_of_segments\n
"},{"location":"modules/mod_mam/#modulesmod_mamcachemodule","title":"modules.mod_mam.cache.module","text":"
  • Syntax: string, one of \"mod_cache_users\" or \"internal\"
  • Default: internal
  • Example: modules.mod_mam.cache.module = \"mod_cache_users\"

Configures which cache to use, either start an internal instance, or reuse the cache created by mod_cache_users, if such module was enabled. Note that if reuse is desired \u2013 that is, cache.module = \"mod_cache_users\", other cache configuration parameters will be ignored.

"},{"location":"modules/mod_mam/#modulesmod_mamasync_writerenabled","title":"modules.mod_mam.async_writer.enabled","text":"
  • Syntax: boolean
  • Default: true
  • Example: modules.mod_mam.async_writer.enabled = false

Enables an asynchronous writer that is faster than the synchronous one but harder to debug. The async writers store batches of messages that will be flush on a timeout (see flush_interval) or when the batch reaches a size (see batch_size), so the results of the lookup operations executed right after message routing may be incomplete until the configured time passes or the queue is full.

"},{"location":"modules/mod_mam/#modulesmod_mamasync_writerflush_interval","title":"modules.mod_mam.async_writer.flush_interval","text":"
  • Syntax: non-negative integer
  • Default: 2000
  • Example: modules.mod_mam.async_writer.flush_interval = 2000

How often (in milliseconds) the buffered messages are flushed to DB.

"},{"location":"modules/mod_mam/#modulesmod_mamasync_writerbatch_size","title":"modules.mod_mam.async_writer.batch_size","text":"
  • Syntax: non-negative integer
  • Default: 30
  • Example: modules.mod_mam.async_writer.batch_size = 30

Max size of the batch for an async writer before the queue is considered full and flushed. If the buffer is full, messages are flushed to a database immediately and the flush timer is reset.

"},{"location":"modules/mod_mam/#modulesmod_mamasync_writerpool_size","title":"modules.mod_mam.async_writer.pool_size","text":"
  • Syntax: non-negative integer
  • Default: 4 * erlang:system_info(schedulers_online)
  • Example: modules.mod_mam.async_writer.pool_size = 32

Number of workers in the pool. More than the number of available schedulers is recommended, to minimise lock contention on the message queues, and more than the number of DB workers, to fully utilise the DB capacity. How much more than these two parameters is then a good fine-tuning for specific deployments.

"},{"location":"modules/mod_mam/#common-backend-options","title":"Common backend options","text":""},{"location":"modules/mod_mam/#modulesmod_mamuser_prefs_store","title":"modules.mod_mam.user_prefs_store","text":"
  • Syntax: one of \"rdbms\", \"cassandra\", \"mnesia\"
  • Default: not set
  • Example: modules.mod_mam.user_prefs_store = \"rdbms\"

Leaving this option unset will prevent users from setting their archiving preferences. It will also increase performance. The possible values are:

  • \"rdbms\" (RDBMS backend only) - User archiving preferences saved in RDBMS. Slow and not recommended, but might be used for simplicity (keeping everything in RDBMS).
  • \"cassandra\" (Cassandra backend only) - User archiving preferences are saved in Cassandra.
  • \"mnesia\" (recommended) - User archiving preferences saved in Mnesia and accessed without transactions. Recommended in most deployments, could be overloaded with lots of users updating their preferences at once. There's a small risk of an inconsistent (in a rather harmless way) state of the preferences table.
"},{"location":"modules/mod_mam/#modulesmod_mamfull_text_search","title":"modules.mod_mam.full_text_search","text":"
  • Syntax: boolean
  • Default: true
  • Example: modules.mod_mam.full_text_search = false

Enables full text search in message archive (see Full Text Search paragraph). Please note that the full text search is currently only implemented for \"rdbms\" backend. Also, full text search works only for messages archived while this option is enabled.

"},{"location":"modules/mod_mam/#is_archivable_message3-callback","title":"is_archivable_message/3 callback","text":"

is_archivable_message option has to name a module exporting is_archivable_message/3 function conforming to the spec:

-spec is_archivable_message(Mod :: module(), Dir :: incoming | outgoing,\n                          Packet :: exml:element()) -> boolean().\n

Servers SHOULD NOT archive messages that do not have a <body/> child tag. Servers SHOULD NOT archive delayed messages.

By default, all messages that hold meaningful content, rather than state changes such as Chat State Notifications, are archived.

"},{"location":"modules/mod_mam/#archiving-chat-markers","title":"Archiving chat markers","text":"

Archiving chat markers can be enabled by setting archive_chat_markers option to true. However it only works if is_archivable_message callback module is set to mod_mam_utils or isn't set at all.

When performing full text search chat markers are treated as if they had empty message body.

"},{"location":"modules/mod_mam/#cassandra-backend","title":"Cassandra backend","text":"

Please consult Outgoing connections page to learn how to properly configure Cassandra connection pool. By default, mod_mam Cassandra backend requires global pool with default tag.

"},{"location":"modules/mod_mam/#elasticsearch-backend","title":"ElasticSearch backend","text":"

First, make sure that your ElasticSearch cluster has expected indexes and mappings in place. Please consult Outgoing connections page to learn how to properly configure ElasticSearch connection pool.

"},{"location":"modules/mod_mam/#low-level-options","title":"Low-level options","text":"

These options allow for fine-grained control over MAM behaviour.

"},{"location":"modules/mod_mam/#modulesmod_mamdefault_result_limit","title":"modules.mod_mam.default_result_limit","text":"
  • Syntax: non-negative integer
  • Default: 50
  • Example: modules.mod_mam.default_result_limit = 100

This sets the default page size of returned results.

"},{"location":"modules/mod_mam/#modulesmod_mammax_result_limit","title":"modules.mod_mam.max_result_limit","text":"
  • Syntax: non-negative integer
  • Default: 50
  • Example: modules.mod_mam.max_result_limit = 100

This sets the maximum page size of returned results.

"},{"location":"modules/mod_mam/#modulesmod_mamenforce_simple_queries","title":"modules.mod_mam.enforce_simple_queries","text":"
  • Syntax: boolean
  • Default: false
  • Example: modules.mod_mam.enforce_simple_queries = true

This enforces all mam lookups to be \"simple\", i.e., they skip the RSM count. See Message Archive Management extensions.

"},{"location":"modules/mod_mam/#modulesmod_mamdelete_domain_limit","title":"modules.mod_mam.delete_domain_limit","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: modules.mod_mam.delete_domain_limit = 10000

Domain deletion can be an expensive operation, as it requires to delete potentially many thousands of records from the DB. By default, the delete operation deletes everything in a transaction, but it might be desired, to handle timeouts and table locks more gracefully, to delete the records in batches. This limit establishes the size of the batch.

Note

Not supported by MSSQL.

"},{"location":"modules/mod_mam/#modulesmod_mamdb_jid_format","title":"modules.mod_mam.db_jid_format","text":"
  • Syntax: string, one of \"mam_jid_rfc\", \"mam_jid_rfc_trust\", \"mam_jid_mini\" or a module implementing mam_jid behaviour
  • Default: \"mam_jid_rfc\" for MUC archive, \"mam_jid_mini\" for PM archive
  • Example: modules.mod_mam.db_jid_format = \"mam_jid_mini\"

Sets the internal MAM jid encoder/decoder module for RDBMS.

Warning

Archive MUST be empty to change this option

"},{"location":"modules/mod_mam/#modulesmod_mamdb_message_format","title":"modules.mod_mam.db_message_format","text":"
  • Syntax: string, one of \"mam_message_xml\", \"mam_message_eterm\", \"mam_message_compressed_eterm\" or a module implementing mam_message behaviour
  • Default: \"mam_message_compressed_eterm\" for RDBMS, \"mam_message_xml\" for Cassandra
  • Example: modules.mod_mam.db_message_format = \"mam_message_compressed_eterm\"

Sets the internal MAM message encoder/decoder module.

Warning

Archive MUST be empty to change this option

"},{"location":"modules/mod_mam/#modulesmod_mamextra_fin_element","title":"modules.mod_mam.extra_fin_element","text":"
  • Syntax: string, a module implementing the extra_fin_element/3 callback
  • Default: none
  • Example: modules.mod_mam.extra_fin_element = \"example_mod\"

This module can be used to add subelements to the <fin> element of the MAM lookup query response. It can be useful to be able to add information to a mam query, that doesn't belong to any specific message but to all of them.

"},{"location":"modules/mod_mam/#modulesmod_mamextra_lookup_params","title":"modules.mod_mam.extra_lookup_params","text":"
  • Syntax: string, a module implementing the extra_lookup_params/2 callback
  • Default: none
  • Example: modules.mod_mam.extra_lookup_params = \"example_mod\"

This module can be used to add extra lookup parameters to MAM lookup queries.

"},{"location":"modules/mod_mam/#example-configuration","title":"Example configuration","text":"
[modules.mod_mam]\n  backend = \"rdbms\"\n  no_stanzaid_element = true\n\n  pm.user_prefs_store = \"rdbms\"\n\n  muc.host = \"muc.example.com\"\n  muc.db_message_format = \"mam_message_xml\"\n  muc.async_writer.enabled = false\n  muc.user_prefs_store = \"mnesia\"\n
"},{"location":"modules/mod_mam/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [HostType, modMamArchiveRemoved] spiral User's entire archive is removed. [HostType, modMamArchived] spiral A message is stored in user's archive. [HostType, modMamDropped] spiral A message couldn't be stored in the DB (and got dropped). [HostType, modMamDroppedIQ] spiral MAM IQ has been dropped due to: high query frequency/invalid syntax or type. [HostType, modMamFlushed] spiral Message was stored in a DB asynchronously. [HostType, modMamForwarded] spiral A message is sent to a client as a part of a MAM query result. [HostType, modMamLookups] spiral A MAM lookup is performed. [HostType, modMamPrefsGets] spiral Archiving preferences have been requested by a client. [HostType, modMamPrefsSets] spiral Archiving preferences have been updated by a client. [HostType, modMucMamArchiveRemoved] spiral Room's entire archive is removed. [HostType, modMucMamArchived] spiral A message is stored in room's archive. [HostType, modMucMamForwarded] spiral A message is sent to a client as a part of a MAM query result from MUC room. [HostType, modMucMamLookups] spiral A MAM lookup in MUC room is performed. [HostType, modMucMamPrefsGets] spiral MUC archiving preferences have been requested by a client. [HostType, modMucMamPrefsSets] spiral MUC archiving preferences have been updated by a client. [HostType, mod_mam_rdbms_async_pool_writer, per_message_flush_time] histogram Average time per message insert measured in an async MAM worker. [HostType, mod_mam_rdbms_async_pool_writer, flush_time] histogram Average time per flush of all buffered messages measured in an async MAM worker. [HostType, mod_mam_muc_rdbms_async_pool_writer, per_message_flush_time] histogram Average time per message insert measured in an async MUC MAM worker. [HostType, mod_mam_muc_rdbms_async_pool_writer, flush_time] histogram Average time per flush of all buffered messages measured in an async MUC MAM worker. Backend action Description (when it gets incremented) lookup A lookup in an archive. archive One message is saved in an archive."},{"location":"modules/mod_muc/","title":"mod_muc","text":""},{"location":"modules/mod_muc/#module-description","title":"Module Description","text":"

This module implements XEP-0045: Multi-User Chat (MUC). It's a common XMPP group chat solution. This extension consists of two Erlang modules: mod_muc and mod_muc_room, the latter being the room code itself. Note that only mod_muc needs to be enabled in the configuration file. Also mod_muc_log is a logging submodule.

"},{"location":"modules/mod_muc/#options","title":"Options","text":""},{"location":"modules/mod_muc/#modulesmod_muchost","title":"modules.mod_muc.host","text":"
  • Syntax: string, a valid subdomain
  • Default: \"conference.@HOST@\"
  • Example: host = \"group.@HOST@\"

Subdomain for MUC service to reside under. @HOST@ is replaced with each served domain.

"},{"location":"modules/mod_muc/#modulesmod_mucbackend","title":"modules.mod_muc.backend","text":"
  • Syntax: string, one of \"mnesia\" or \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Storage backend to store rooms and settings persistently.

"},{"location":"modules/mod_muc/#modulesmod_muconline_backend","title":"modules.mod_muc.online_backend","text":"
  • Syntax: string, one of \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: online_backend = \"cets\"

Backend to use to register and find online rooms. Queried when routing stanzas to the rooms.

Warning

The corresponding internal database has to be enabled.

"},{"location":"modules/mod_muc/#modulesmod_mucaccess","title":"modules.mod_muc.access","text":"
  • Syntax: non-empty string
  • Default: \"all\"
  • Example: access = \"muc\"

Access Rule to determine who is allowed to use the MUC service.

"},{"location":"modules/mod_muc/#modulesmod_mucaccess_create","title":"modules.mod_muc.access_create","text":"
  • Syntax: non-empty string
  • Default: \"all\"
  • Example: access_create = \"muc_create\"

Access Rule to determine who is allowed to create rooms.

"},{"location":"modules/mod_muc/#modulesmod_mucaccess_admin","title":"modules.mod_muc.access_admin","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: access_admin = \"muc_create\"

Access Rule to determine who is the administrator in all rooms.

"},{"location":"modules/mod_muc/#modulesmod_mucaccess_persistent","title":"modules.mod_muc.access_persistent","text":"
  • Syntax: non-empty string
  • Default: \"all\"
  • Example: access_persistent = \"none\"

Access Rule to determine who is allowed to make the rooms persistent. In order to change this parameter, the user must not only match the Access Rule but also be the owner of the room.

"},{"location":"modules/mod_muc/#modulesmod_muchistory_size","title":"modules.mod_muc.history_size","text":"
  • Syntax: non-negative integer
  • Default: 20
  • Example: history_size = 30

Room message history to be kept in RAM. After node restart, the history is lost.

"},{"location":"modules/mod_muc/#modulesmod_mucroom_shaper","title":"modules.mod_muc.room_shaper","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: room_shaper = \"muc_room_shaper\"

Limits per-room data throughput with traffic shaper.

"},{"location":"modules/mod_muc/#modulesmod_mucmax_room_id","title":"modules.mod_muc.max_room_id","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_room_id = 30

Maximum room username length (in JID).

"},{"location":"modules/mod_muc/#modulesmod_mucmax_room_name","title":"modules.mod_muc.max_room_name","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_room_name = 30

Maximum room name length.

"},{"location":"modules/mod_muc/#modulesmod_mucmax_room_desc","title":"modules.mod_muc.max_room_desc","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_room_desc = 140

Maximum room description length.

"},{"location":"modules/mod_muc/#modulesmod_mucmin_message_interval","title":"modules.mod_muc.min_message_interval","text":"
  • Syntax: non-negative integer
  • Default: 0
  • Example: min_message_interval = 1

Minimal interval (in seconds) between messages processed by the room.

"},{"location":"modules/mod_muc/#modulesmod_mucmin_presence_interval","title":"modules.mod_muc.min_presence_interval","text":"
  • Syntax: non-negative integer
  • Default: 0
  • Example: min_presence_interval = 1

Minimal interval (in seconds) between presences processed by the room.

"},{"location":"modules/mod_muc/#modulesmod_mucmax_users","title":"modules.mod_muc.max_users","text":"
  • Syntax: positive integer
  • Default: 200
  • Example: max_users = 100

Absolute maximum user count per room on the node.

"},{"location":"modules/mod_muc/#modulesmod_mucmax_users_admin_threshold","title":"modules.mod_muc.max_users_admin_threshold","text":"
  • Syntax: positive integer
  • Default: 5
  • Example: max_users_admin_threshold = 10

When the server checks if a new user can join a room and they are an admin, max_users_admin_threshold is added to max_users during occupant limit check.

"},{"location":"modules/mod_muc/#modulesmod_mucuser_message_shaper","title":"modules.mod_muc.user_message_shaper","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: user_message_shaper = \"muc_user_msg_shaper\"

Shaper for user messages processed by a room (global for the room).

"},{"location":"modules/mod_muc/#modulesmod_mucuser_presence_shaper","title":"modules.mod_muc.user_presence_shaper","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: user_presence_shaper = \"muc_user_presence_shaper\"

Shaper for user presences processed by a room (global for the room).

"},{"location":"modules/mod_muc/#modulesmod_mucmax_user_conferences","title":"modules.mod_muc.max_user_conferences","text":"
  • Syntax: non-negative integer
  • Default: 10
  • Example: max_user_conferences = 5

Specifies the number of rooms that a user can occupy simultaneously.

"},{"location":"modules/mod_muc/#modulesmod_muchttp_auth_pool","title":"modules.mod_muc.http_auth_pool","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: http_auth_pool = \"external_auth\"

If an external HTTP service is chosen to check passwords for password-protected rooms, this option specifies the HTTP pool name to use (see External HTTP Authentication below).

"},{"location":"modules/mod_muc/#modulesmod_mucload_permanent_rooms_at_startup","title":"modules.mod_muc.load_permanent_rooms_at_startup","text":"
  • Syntax: boolean
  • Default: false
  • Example: load_permanent_rooms_at_startup = true

Load all rooms at startup. Because it can be unsafe when there are many rooms, it is disabled by default.

"},{"location":"modules/mod_muc/#modulesmod_muchibernate_timeout","title":"modules.mod_muc.hibernate_timeout","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 90000 (milliseconds, 90 seconds)
  • Example: hibernate_timeout = 60000

Timeout (in milliseconds) defining the inactivity period after which the room's process should be hibernated.

"},{"location":"modules/mod_muc/#modulesmod_muchibernated_room_check_interval","title":"modules.mod_muc.hibernated_room_check_interval","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: hibernated_room_check_interval = 120000

Interval defining how often the hibernated rooms will be checked (a timer is global for a node).

"},{"location":"modules/mod_muc/#modulesmod_muchibernated_room_timeout","title":"modules.mod_muc.hibernated_room_timeout","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: hibernated_room_timeout = 120000

A time after which a hibernated room is stopped (deeply hibernated). See MUC performance optimisation.

"},{"location":"modules/mod_muc/#modulesmod_mucdefault_room","title":"modules.mod_muc.default_room","text":"
  • Syntax: A TOML table of options described below
  • Default: Default room options
  • Example:
  [modules.mod_muc.default_room]\n    password_protected = true\n    description = \"An example description.\"\n\n    [[modules.mod_muc.default_room.affiliations]]\n        user = \"alice\"\n        server = \"localhost\"\n        resource = \"resource1\"\n        affiliation = \"member\"\n

or:

  default_room.password_protected = true\n  default_room.description = \"An example description.\"\n\n  [[modules.mod_muc.default_room.affiliations]]\n    user = \"alice\"\n    server = \"localhost\"\n    resource = \"resource1\"\n    affiliation = \"member\"\n

Available room configuration options to be overridden in the initial state:

  • modules.mod_muc.default_room.title

    • Syntax: string
    • Default: \"\"
    • Example: title = \"example_title\"

    Room title, short free text.

  • modules.mod_muc.default_room.description

    • Syntax: string
    • Default: \"\"
    • Example: description = \"An example description.\"

    Room description, long free text.

  • modules.mod_muc.default_room.allow_change_subj

    • Syntax: boolean
    • Default: true
    • Example: allow_change_subj = false

    Allow all occupants to change the room subject.

  • modules.mod_muc.default_room.allow_query_users

    • Syntax: boolean
    • Default: true
    • Example: allow_query_users = false

    Allow occupants to send IQ queries to other occupants.

  • modules.mod_muc.default_room.allow_private_messages

    • Syntax: boolean
    • Default: true
    • Example: allow_private_messages = false

    Allow private messaging between occupants.

  • modules.mod_muc.default_room.allow_visitor_status

    • Syntax: boolean
    • Default: true
    • Example: allow_visitor_status = false

    Allow occupants to use text statuses in presences. When disabled, text is removed by the room before broadcasting.

  • modules.mod_muc.default_room.allow_visitor_nickchange

    • Syntax: boolean
    • Default: true
    • Example: allow_visitor_nickchange = false

    Allow occupants to change nicknames.

  • modules.mod_muc.default_room.public

    • Syntax: boolean
    • Default: true
    • Example: public = false

    Room is included in the list available via Service Discovery.

  • modules.mod_muc.default_room.public_list

    • Syntax: boolean
    • Default: true
    • Example: public_list = false

    Member list can be fetched by non-members.

  • modules.mod_muc.default_room.persistent

    • Syntax: boolean
    • Default: false
    • Example: persistent = true

    Room will be stored in DB and survive even when the last occupant leaves or the node is restarted.

  • modules.mod_muc.default_room.moderated

    • Syntax: boolean
    • Default: true
    • Example: moderated = false

    Only occupants with a \"voice\" can send group chat messages.

  • modules.mod_muc.default_room.members_by_default

    • Syntax: boolean
    • Default: true
    • Example: members_by_default = false

    All new occupants are members by default, unless they have a different affiliation assigned.

  • modules.mod_muc.default_room.members_only

    • Syntax: boolean
    • Default: false
    • Example: members_only = true

    Only users with a member affiliation can join the room.

  • modules.mod_muc.default_room.allow_user_invites

    • Syntax: boolean
    • Default: false
    • Example: allow_user_invites = true

    Allow ordinary members to send mediated invitations.

  • modules.mod_muc.default_room.allow_multiple_sessions

    • Syntax: boolean
    • Default: false
    • Example: allow_multiple_sessions = true

    Allow multiple user session to use the same nick.

  • modules.mod_muc.default_room.password_protected

    • Syntax: boolean
    • Default: false
    • Example: password_protected = true

    Room is protected with a password.

  • modules.mod_muc.default_room.password

    • Syntax: string
    • Default: \"\"
    • Example: password = \"secret\"

    Room password is required upon joining. This option has no effect when password_protected is false.

  • modules.mod_muc.default_room.anonymous

    • Syntax: boolean
    • Default: true
    • Example: anonymous = false

    Room is anonymous, meaning occupants can't see each others real JIDs, except for the room moderators.

  • modules.mod_muc.default_room.max_users

    • Syntax: positive integer
    • Default: 200
    • Example: max_users = 100

    Maximum user count per room. Admins and the room owner are not affected.

  • modules.mod_muc.default_room.logging

    • Syntax: boolean
    • Default: false
    • Example: logging = true

    Enables logging of room events (messages, presences) to a file on the disk. Uses mod_muc_log.

  • modules.mod_muc.default_room.maygetmemberlist

    • Syntax: array of non-empty strings
    • Default: []
    • Example: maygetmemberlist = [\"moderator\"]

    An array of roles and/or privileges that enable retrieving the room's member list.

  • modules.mod_muc.default_room.affiliations

    • Syntax: array of tables with keys:
      • user - non-empty string,
      • server - string, a valid domain,
      • resource - string,
      • affiliation - non-empty string
    • Default: []
    • Example:
[[modules.mod_muc.default_room.affiliations]]\n  user = \"alice\"\n  server = \"localhost\"\n  resource = \"resource1\"\n  affiliation = \"member\"\n\n[[modules.mod_muc.default_room.affiliations]]\n  user = \"bob\"\n  server = \"localhost\"\n  resource = \"resource2\"\n  affiliation = \"owner\"\n

This is the default list of affiliations set for every new room.

  • modules.mod_muc.default_room.subject

    • Syntax: string
    • Default: \"\"
    • Example: subject = \"Lambda days\"

    A default subject for new room.

  • modules.mod_muc.default_room.subject_author

    • Syntax: string
    • Default: \"\"
    • Example: subject_author = \"Alice\"

    A nick name of the default subject's author.

"},{"location":"modules/mod_muc/#example-configuration","title":"Example Configuration","text":"
[modules.mod_muc]\n  host = \"muc.example.com\"\n  access = \"muc\"\n  access_create = \"muc_create\"\n  http_auth_pool = \"my_auth_pool\"\n  default_room.password_protected = true\n\n  [[modules.mod_muc.default_room.affiliations]]\n    user = \"alice\"\n    server = \"localhost\"\n    resource = \"resource1\"\n    affiliation = \"member\"\n\n  [[modules.mod_muc.default_room.affiliations]]\n    user = \"bob\"\n    server = \"localhost\"\n    resource = \"resource2\"\n    affiliation = \"owner\"\n
"},{"location":"modules/mod_muc/#performance-optimisations","title":"Performance optimisations","text":"

Each room is represented by an Erlang process with its own state and can consume memory even if it's not used. In large installations with many rooms, this might cause performance issues. To address that problem MongooseIM has 2 levels of MUC rooms memory optimisations.

"},{"location":"modules/mod_muc/#rooms-process-hibernation","title":"Room's process hibernation","text":"

By default the room's process is hibernated by the Erlang VM 90 seconds after the last activity. This timeout can be modified by hibernate_timeout option.

"},{"location":"modules/mod_muc/#room-deep-hibernation","title":"Room deep hibernation","text":"

MongooseIM introduces an additional option of deep hibernation for unused rooms. This optimisation works only for persistent rooms as only these can be restored on demand. The improvement works as follows: 1. All room processes are traversed at a chosen hibernated_room_check_interval. 1. If a hibernated_room_timeout is exceeded, a \"stop\" signal is sent to a unused room. 1. The room's process is stopped only if there are no online users or if the only one is its owner. If the owner is online, a presence of a type unavailable is sent to it indicating that the room's process is being terminated.

The room's process can be recreated on demand, for example when a presence sent to it, or the owner wants to add more users to the room.

"},{"location":"modules/mod_muc/#external-http-authentication","title":"External HTTP Authentication","text":"

MUC rooms can be protected by a password that is set by the room owner. Note that MongooseIM supports another custom solution, where each attempt to enter or create a room requires the password to be checked by an external HTTP service. To enable this option, you need to:

  • Configure an HTTP connection pool.
  • Set the name of the connection pool as the value of the http_auth_pool option of mod_muc.
  • Enable the password_protected default room option (without setting the password itself).

Whenever a user tries to enter or create a room, the server will receive a GET request to the check_password path. It should return a 200 response with a JSON object {\"code\": Code, \"msg\": Message} in the response body. If the server returns something else, an error presence will be sent back to the client.

  • Code is the status code: 0 indicates a successful authentication, any other value means the authentication failed.
  • Message is a string containing the message to be sent back to the XMPP client indicating the reason for a failed authentication. When authentication succeeds it is ignored and can contain anything ( eg. the string \"OK\").

Example:

[outgoing_pools.http.my_auth_pool]\n  strategy = \"available_worker\"\n  connection.host = \"http://my_server:8000\"\n\n[modules.mod_muc]\n  host = \"muc.example.com\"\n  access = \"muc\"\n  access_create = \"muc_create\"\n  http_auth_pool = \"my_auth_pool\"\n  default_room.password_protected = true\n
"},{"location":"modules/mod_muc/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [global, mod_muc, deep_hibernations] spiral A room process is stopped (applies only to persistent rooms). [global, mod_muc, process_recreations] spiral A room process is recreated from a persisted state. [global, mod_muc, hibernations] spiral A room process becomes hibernated (garbage collected and put in wait state). [global, mod_muc, hibernated_rooms] value How many rooms are in hibernated state. Does not include rooms in \"deep hibernation\". [global, mod_muc, online_rooms] value How many rooms have running processes (includes rooms in a hibernated state)."},{"location":"modules/mod_muc_light/","title":"mod_muc_light","text":""},{"location":"modules/mod_muc_light/#module-description","title":"Module Description","text":"

This module implements Multi-User Chat Light. It's an experimental XMPP group chat solution. This extension consists of several modules but only mod_muc_light needs to be enabled in the config file.

"},{"location":"modules/mod_muc_light/#options","title":"Options","text":""},{"location":"modules/mod_muc_light/#modulesmod_muc_lighthost","title":"modules.mod_muc_light.host","text":"
  • Syntax: string, a valid subdomain
  • Default: \"muclight.@HOST@\"
  • Example: host = \"group.@HOST@\"

Domain for the MUC Light service to reside under. @HOST@ is replaced with each served domain.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightbackend","title":"modules.mod_muc_light.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Database backend to use.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightcache_affs","title":"modules.mod_muc_light.cache_affs.*","text":"
  • Syntax: TOML section
  • Default: not declared
  • Example: [modules.mod_muc_light.cache_affs]

Enables caching affiliations for rooms, this has the advantage that the list of affiliations of a given room is stored locally, instead of being fetched from the DB on each message delivered to a room. On the other hand, in an edge case of a network split when the affiliations of a room are changed, there's a risk of inconsistencies for the cache having values in one node not yet synchronised with the other.

If caching is enabled, it will spawn its own segmented cache cache. To configure the cache parameters, the same config can be stored under the cache_affs section. To see details about the meaning of each flag, see mod_cache_users.

modules.mod_muc_light.cache_affs.strategy\nmodules.mod_muc_light.cache_affs.time_to_live\nmodules.mod_muc_light.cache_affs.number_of_segments\n
"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightequal_occupants","title":"modules.mod_muc_light.equal_occupants","text":"
  • Syntax: boolean
  • Default: false
  • Example: equal_occupants = true

When enabled, MUC Light rooms won't have owners. It means that every occupant will be a member, even the room creator.

Warning

This option does not implicitly set all_can_invite to true. If that option is set to false, nobody will be able to join the room after the initial creation request.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightlegacy_mode","title":"modules.mod_muc_light.legacy_mode","text":"
  • Syntax: boolean
  • Default: false
  • Example: legacy_mode = true

Enables XEP-0045 compatibility mode. It allows using a subset of classic MUC stanzas with some MUC Light functions limited.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightrooms_per_user","title":"modules.mod_muc_light.rooms_per_user","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: rooms_per_user = 100

Specifies a cap on a number of rooms a user can occupy.

Warning

Setting such a limit may trigger expensive DB queries for every occupant addition.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightblocking","title":"modules.mod_muc_light.blocking","text":"
  • Syntax: boolean
  • Default: true
  • Example: blocking = false

Blocking feature enabled/disabled.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightall_can_configure","title":"modules.mod_muc_light.all_can_configure","text":"
  • Syntax: boolean
  • Default: false
  • Example: all_can_configure = true

When enabled, all room occupants can change all configuration options. If disabled, everyone can still change the room subject.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightall_can_invite","title":"modules.mod_muc_light.all_can_invite","text":"
  • Syntax: boolean
  • Default: false
  • Example: all_can_invite = true

When enabled, all room occupants can add new occupants to the room. Occupants added by members become members as well.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightmax_occupants","title":"modules.mod_muc_light.max_occupants","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_occupants = 100

Specifies a cap on the occupant count per room.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightrooms_per_page","title":"modules.mod_muc_light.rooms_per_page","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 10
  • Example: rooms_per_page = 100

Specifies maximal number of rooms returned for a single Disco request.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightrooms_in_rosters","title":"modules.mod_muc_light.rooms_in_rosters","text":"
  • Syntax: boolean
  • Default: false
  • Example: rooms_in_rosters = true

When enabled, rooms the user occupies are included in their roster.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightconfig_schema","title":"modules.mod_muc_light.config_schema","text":"
  • Syntax: an array of config_schema items, as described below
  • Default:
        [[modules.mod_muc_light.config_schema]]\n          field = \"roomname\"\n          string_value = \"Untitled\"\n\n        [[modules.mod_muc_light.config_schema]]\n          field = \"subject\"\n          string_value = \"\"\n
  • Example:
        [[modules.mod_muc_light.config_schema]]\n          field = \"display-lines\"\n          integer_value = 30\n          internal_key = \"display_lines\"\n

Defines fields allowed in the room configuration.

Each config_schema item is a TOML table with the following keys:

  • field - mandatory, non-empty string - field name.
  • string_value, integer_value, float_value - exactly one of them has to be present, depending on the type of the field:
    • string_value - string,
    • integer_value - integer,
    • float_value - floating-point number.
  • internal_key - optional, non-empty string - field name used in the internal representation, useful only for debugging or custom applications. By default it is the same as field.

Warning

Lack of the roomname field will cause room names in Disco results and Roster items be set to the room username.

"},{"location":"modules/mod_muc_light/#example-configuration","title":"Example Configuration","text":"
[modules.mod_muc_light]\n  host = \"muclight.example.com\"\n  equal_occupants = true\n  legacy_mode = true\n  rooms_per_user = 10\n  blocking = false\n  all_can_configure = true\n  all_can_invite = true\n  max_occupants = 50\n  rooms_per_page = 5\n  rooms_in_rosters = true\n\n  [modules.mod_muc_light.cache_affs]\n    time_to_live = 60\n\n  [[modules.mod_muc_light.config_schema]] \n    field = \"roomname\"\n    string_value = \"The Room\"\n\n  [[modules.mod_muc_light.config_schema]] \n    field = \"display-lines\"\n    integer_value = 30\n    internal_key = \"display_lines\"\n
"},{"location":"modules/mod_muc_light/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) create_room A new room is stored in a DB. destroy_room Room data is removed from a DB. room_exists A room existence is checked. get_user_rooms A list of rooms the user is a participant of is retrieved from a DB. remove_user All MUC Light related user data is removed from a DB. get_config A room config is retrieved from a DB. set_config A room config is updated in a DB. get_blocking Blocking data is fetched from a DB. set_blocking Blocking data is updated in a DB. get_aff_users An affiliated users list is fetched from a DB. modify_aff_users Affiliations in a room are updated in a DB."},{"location":"modules/mod_muc_log/","title":"mod_muc_log","text":""},{"location":"modules/mod_muc_log/#module-description","title":"Module Description","text":"

A logging submodule for mod_muc. Is must be explicitly configured to work. It writes room-related information (configuration) and events (messages, presences) to files on the disk.

"},{"location":"modules/mod_muc_log/#options","title":"Options","text":""},{"location":"modules/mod_muc_log/#modulesmod_muc_logoutdir","title":"modules.mod_muc_log.outdir","text":"
  • Syntax: string
  • Default: \"www/muc\"
  • Example: outdir = \"www/muc\"

Filesystem directory where the files are stored.

"},{"location":"modules/mod_muc_log/#modulesmod_muc_logaccess_log","title":"modules.mod_muc_log.access_log","text":"
  • Syntax: non-empty string
  • Default: \"muc_admin\"
  • Example: access_log = \"muc_admin\"

ACL that defines who can enable/disable logging for specific rooms.

"},{"location":"modules/mod_muc_log/#modulesmod_muc_logdirtype","title":"modules.mod_muc_log.dirtype","text":"
  • Syntax: string, one of \"subdirs\", \"plain\"
  • Default: \"subdirs\"
  • Example: dirtype = \"subdirs\"

Specifies the log directory structure:

  • \"subdirs\": Module will use the following directory structure [Logs root]/[dirname]/YYYY/MM/ with file names being DD.[extension].
  • \"plain\": Module will use the following directory structure [Logs root]/[dirname]/ with file names being YYYY-MM-DD.[extension].
"},{"location":"modules/mod_muc_log/#modulesmod_muc_logdirname","title":"modules.mod_muc_log.dirname","text":"
  • Syntax: string, one of \"room_jid\", \"room_name\"
  • Default: \"room_jid\"
  • Example: dirname = \"room_jid\"

Specifies directory name created for each room:

  • \"room_jid\": Uses the room bare JID.
  • \"room_name\": Uses the room name from its configuration.
"},{"location":"modules/mod_muc_log/#modulesmod_muc_logfile_format","title":"modules.mod_muc_log.file_format","text":"
  • Syntax: string, one of \"html\", \"plaintext\"
  • Default: \"html\"
  • Example: file_format = \"html\"

Specifies the format of output files:

  • \"html\": The output is a fancy-formatted HTML page.
  • \"plaintext\": Just a text file, better suited for processing than HTML.
"},{"location":"modules/mod_muc_log/#modulesmod_muc_logcss_file","title":"modules.mod_muc_log.css_file","text":"
  • Syntax: non-empty string
  • Default: not set - the default styles for HTML logs are used
  • Example: css_file = \"path/to/css/file\"

Specifies the css file used for logs rendering. Please note it won't be copied to the logs directory but the given path will be linked in HTML files instead.

"},{"location":"modules/mod_muc_log/#modulesmod_muc_logtimezone","title":"modules.mod_muc_log.timezone","text":"
  • Syntax: string, one of \"local\", \"universal\"
  • Default: \"local\"
  • Example: timezone = \"universal\"

Specifies the timezone to be used in timestamps written into the logs:

  • local: Uses the local server timezone.
  • universal: Uses GMT.
"},{"location":"modules/mod_muc_log/#modulesmod_muc_logtop_link","title":"modules.mod_muc_log.top_link","text":"
  • Syntax: TOML table with the following mandatory keys: \"target\", \"text\" and string values.
  • Default: {target = \"/\", text = \"Home\"}
  • Example: top_link = {target = \"/top\", text = \"Top page\"}

Allows setting a custom link at the top of the HTML log file. First tuple element is the link target and the second one is the text to be displayed. You can put any HTML instead of just plain text.

"},{"location":"modules/mod_muc_log/#modulesmod_muc_logspam_prevention","title":"modules.mod_muc_log.spam_prevention","text":"
  • Syntax: boolean
  • Default: true
  • Example: spam_prevention = false

When enabled, MongooseIM will enforce rel=\"nofollow\" attribute in links sent by user and written to MUC logs.

"},{"location":"modules/mod_muc_log/#example-configuration","title":"Example Configuration","text":"
[modules.mod_muc_log]\n  outdir = \"/tmp/muclogs\"\n  access_log = \"muc\"\n  dirtype = \"plain\"\n  dirname = \"room_name\"\n  file_format = \"html\"\n  css_file = \"path/to/css/file\"\n  timezone = \"universal\"\n  top_link.target = \"/\"\n  top_link.text = \"Home\"\n
"},{"location":"modules/mod_offline/","title":"mod_offline","text":""},{"location":"modules/mod_offline/#module-description","title":"Module Description","text":"

This module implements an offline messages storage compliant with XEP-0160: Best Practices for Handling Offline Messages. It can store one-to-one and groupchat messages only when the recipient has no online resources. It is not well suited for applications supporting multiple user devices, because anything saved in the DB can be retrieved only once, so the message history is not synchronised between devices. Although mod_offline may be sufficient in some cases, it is preferable to use mod_mam.

If this module is disabled, an error 503 with text \"Bounce offline message\" would be sent back to the sender, each time a message is sent to an offline user. Check mod_offline_stub to disable this error message.

"},{"location":"modules/mod_offline/#options","title":"Options","text":""},{"location":"modules/mod_offline/#modulesmod_offlineaccess_max_user_messages","title":"modules.mod_offline.access_max_user_messages","text":"
  • Syntax: non-empty string
  • Default: \"max_user_offline_messages\"
  • Example: access_max_user_messages = \"custom_max_user_offline_messages\"

Access Rule to use for limiting the storage size per user.

"},{"location":"modules/mod_offline/#modulesmod_offlinebackend","title":"modules.mod_offline.backend","text":"
  • Syntax: string, one of mnesia, rdbms
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Storage backend.

"},{"location":"modules/mod_offline/#modulesmod_offlinestore_groupchat_messages","title":"modules.mod_offline.store_groupchat_messages","text":"
  • Syntax: boolean
  • Default: false
  • Example: store_groupchat_messages = true

Specifies whether or not we should store groupchat messages.

Warning

This option can work only with MUC-light and is not expected to work with MUC.

"},{"location":"modules/mod_offline/#example-configuration","title":"Example Configuration","text":"
[modules.mod_offline]\n  access_max_user_messages = \"max_user_offline_messages\"\n  backend = \"rdbms\"\n  store_groupchat_messages = true\n
"},{"location":"modules/mod_offline/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Type Description (when it gets incremented) pop_messages histogram Offline messages for a user are retrieved and deleted from a DB. write_messages histogram New offline messages to a user are written in a DB."},{"location":"modules/mod_offline_stub/","title":"mod_offline_stub","text":""},{"location":"modules/mod_offline_stub/#module-description","title":"Module Description","text":"

RFC 6121 requires a <service-unavailable/> stanza error to be sent to a user messaging an unavailable recipient if the message is not stored for delayed delivery (i.e. as an \"offline message\"). If the recipient exists (i.e. auth module returns true from does_user_exist), mod_mam stores the message, but is still returned. This is not compliant with the RFC. This module prevents returning . Please note that mod_offline_stub is not tightly coupled with mod_mam. It can be used as a standalone extension, if the specific application requires it.

"},{"location":"modules/mod_offline_stub/#options","title":"Options","text":"

None.

"},{"location":"modules/mod_offline_stub/#example-configuration","title":"Example Configuration","text":"
[modules.mod_offline_stub]\n
"},{"location":"modules/mod_ping/","title":"mod_ping","text":""},{"location":"modules/mod_ping/#module-description","title":"Module Description","text":"

This module implements XMPP Ping functionality as described in XEP-0199: XMPP Ping.

"},{"location":"modules/mod_ping/#options","title":"Options","text":""},{"location":"modules/mod_ping/#modulesmod_pingsend_pings","title":"modules.mod_ping.send_pings","text":"
  • Syntax: boolean
  • Default: false
  • Example: send_pings = true

If set to true, the server will send ping iqs to the client if they are not active for a ping_interval.

"},{"location":"modules/mod_ping/#modulesmod_pingping_interval","title":"modules.mod_ping.ping_interval","text":"
  • Syntax: positive integer (seconds)
  • Default: 60
  • Example: ping_interval = 30

Defines the client inactivity timeout after which the server will send a ping request if the above option is set to true.

"},{"location":"modules/mod_ping/#modulesmod_pingtimeout_action","title":"modules.mod_ping.timeout_action","text":"
  • Syntax: string, one of \"none\", \"kill\"
  • Default: \"none\"
  • Example: timeout_action = \"kill\"

Defines if the client connection should be closed if it doesn't reply to a ping request in less than ping_req_timeout.

"},{"location":"modules/mod_ping/#modulesmod_pingping_req_timeout","title":"modules.mod_ping.ping_req_timeout","text":"
  • Syntax: positive integer (seconds)
  • Default: 32
  • Example: ping_req_timeout = 60

Defines how long the server waits for the client to reply to the ping request.

"},{"location":"modules/mod_ping/#modulesmod_pingiqdisctype","title":"modules.mod_ping.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_ping/#example-configuration","title":"Example Configuration","text":"
[modules.mod_ping]\n  send_pings = true\n  ping_interval = 60\n  timeout_action = \"none\"\n  ping_req_timeout = 32\n
"},{"location":"modules/mod_ping/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [HostType, mod_ping, ping_response] spiral Client responds to a ping. [HostType, mod_ping, ping_response_timeout] spiral Ping request timeouts without a response from client. [HostType, mod_ping, ping_response_time] histogram Response times (doesn't include timeouts)."},{"location":"modules/mod_presence/","title":"mod_presence","text":""},{"location":"modules/mod_presence/#module-description","title":"Module Description","text":"

This module implements server-side presence handling as specified in RFC 6121.

According to RFC 6121, section 1.3:

it must be possible to use the protocol to provide a presence service, a messaging service, or both. (...) it is not mandatory for an XMPP service to offer both a presence service and a messaging service, and the protocol makes it possible to offer separate and distinct services for presence and for messaging.

This is why server-side presence management and broadcasting is provided separately by this module. It is enabled in the default configuration file, but you can disable it if your use case does not require server-side presence handling - this could significantly improve performance.

"},{"location":"modules/mod_presence/#options","title":"Options","text":"

This module has no configurable options.

"},{"location":"modules/mod_presence/#example-configuration","title":"Example Configuration","text":"
[modules.mod_presence]\n
"},{"location":"modules/mod_presence/#metrics","title":"Metrics","text":"

There are no metrics specific to this module.

"},{"location":"modules/mod_privacy/","title":"mod_privacy","text":""},{"location":"modules/mod_privacy/#module-description","title":"Module Description","text":"

This module implements XEP-0016: Privacy Lists. This extension allows user to block IQs, messages, presences, or all, based on JIDs, subscription, and roster groups.

"},{"location":"modules/mod_privacy/#options","title":"Options","text":""},{"location":"modules/mod_privacy/#modulesmod_privacybackend","title":"modules.mod_privacy.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\".
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"
"},{"location":"modules/mod_privacy/#example-configuration","title":"Example Configuration","text":"
[modules.mod_privacy]\n  backend = \"rdbms\"\n
"},{"location":"modules/mod_privacy/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) get_privacy_list A privacy list is retrieved from a DB. get_list_names Names of user's privacy lists are fetched from a DB. get_default_list A default privacy list for a user is fetched from a DB. set_default_list A default list's name for a user is set in a DB. forget_default_list A default list's name for a user is removed from a DB. remove_privacy_list A privacy list is deleted from a DB. replace_privacy_list A privacy list is updated (replaced) in a DB."},{"location":"modules/mod_private/","title":"mod_private","text":""},{"location":"modules/mod_private/#module-description","title":"Module Description","text":"

This module implements XEP-0049: Private XML Storage. It allows users to store custom XML data in the server's database. Used e.g. for storing roster groups separator.

"},{"location":"modules/mod_private/#options","title":"Options","text":""},{"location":"modules/mod_private/#modulesmod_privateiqdisctype","title":"modules.mod_private.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_private/#modulesmod_privatebackend","title":"modules.mod_private.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\".
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"

Database backend to use.

"},{"location":"modules/mod_private/#example-configuration","title":"Example Configuration","text":"
[modules.mod_private]\n  backend = \"mnesia\"\n
"},{"location":"modules/mod_private/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend operation Description (when it gets incremented) multi_get_data XML data is fetched from a DB. multi_set_data XML data is stored in a DB."},{"location":"modules/mod_pubsub/","title":"mod_pubsub","text":""},{"location":"modules/mod_pubsub/#what-is-pubsub","title":"What is PubSub?","text":"

PubSub is a design pattern which mostly promotes a loose coupling between two kinds of entities - publishers and subscribers. Like their names suggest, in the pubsub world we have publishers who fire events, and subscribers who wish to be notified about those events when publishers push data. There might be several subscribers, several publishers, and even several channels (or nodes) where the events are sent.

"},{"location":"modules/mod_pubsub/#module-description","title":"Module Description","text":"

This module implements XEP-0060: Publish-Subscribe. Due to the complexity of the protocol, the PubSub engine makes successive calls to the nodetree and node plugins in order to check the validity of requests, perform the corresponding action and return a result or appropriate error. Such an architecture makes it much easier to write custom pubsub plugins and add new storage backends. It's all about tailoring PubSub to your needs!

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_pubsub/#options","title":"Options","text":""},{"location":"modules/mod_pubsub/#modulesmod_pubsubiqdisctype","title":"modules.mod_pubsub.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubhost","title":"modules.mod_pubsub.host","text":"
  • Syntax: string
  • Default: \"pubsub.@HOST@\"
  • Example: host = \"pubsub.localhost\"

Subdomain for Pubsub service to reside under. @HOST@ is replaced with each served domain.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubbackend","title":"modules.mod_pubsub.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Database backend to use.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubaccess_createnode","title":"modules.mod_pubsub.access_createnode","text":"
  • Syntax: string, rule name, or \"all\"
  • Default: \"all\"
  • Example: access_createnode = \"all\"

Specifies who is allowed to create pubsub nodes. The access rule referenced here needs to be defined in the access section.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubmax_items_node","title":"modules.mod_pubsub.max_items_node","text":"
  • Syntax: non-negative integer
  • Default: 10
  • Example: max_items_node = 10

Defines the maximum number of items that can be stored in a node.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubmax_subscriptions_node","title":"modules.mod_pubsub.max_subscriptions_node","text":"
  • Syntax: non-negative integer
  • Default: not specified (no limit)
  • Example: max_subscriptions_node = 10

The maximum number of subscriptions managed by a node. By default there is no limit.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubnodetree","title":"modules.mod_pubsub.nodetree","text":"
  • Syntax: string
  • Default: \"tree\"
  • Example: nodetree = \"tree\"

Specifies the storage and organisation of the pubsub nodes. See the section below.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubignore_pep_from_offline","title":"modules.mod_pubsub.ignore_pep_from_offline","text":"
  • Syntax: boolean
  • Default: true
  • Example: ignore_pep_from_offline = false

Specifies whether or not we should get last published PEP items from users in our roster which are offline when we connect. The default option is true hence we will get only the last items from the online contacts.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsublast_item_cache","title":"modules.mod_pubsub.last_item_cache","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\", \"false\"
  • Default: \"false\"
  • Example: last_item_cache = \"mnesia\"

If enabled, PubSub will cache the last published items in the nodes. It may increase PubSub performance but at a price of an increased memory usage.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubplugins","title":"modules.mod_pubsub.plugins","text":"
  • Syntax: array of strings
  • Default: [\"flat\"]
  • Example: plugins = [\"flat\", \"pep\"]

List of enabled pubsub plugins.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubpep_mapping","title":"modules.mod_pubsub.pep_mapping","text":"
  • Syntax: Array of TOML tables with the following keys: \"namespace\", \"node\" and string values.
  • Default: []
  • Example: pep_mapping = [{namespace = \"urn:xmpp:microblog:0\", node = \"mb\"}]

This permits creating a Key-Value list to define a custom node plugin on a given PEP namespace. E.g. pair {\"urn:xmpp:microblog:0\", \"mb\"} will use module node_mb instead of node_pep when the specified namespace is used.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubdefault_node_config","title":"modules.mod_pubsub.default_node_config","text":"
  • Syntax: TOML table with the following values: string, boolean or non-negative integer.
  • Default: {}
  • Example: default_node_config = {deliver_payloads = true, max_payload_size = 10000, node_type = \"leaf\"}

Overrides the default node configuration, regardless of the node plugin. Node configuration still uses the default configuration defined by the node plugin, and overrides any items by the value defined in this configurable list.

The possible options, altogether with their default values for each node plugin, are listed in the table below:

syntax node_flat / node_hometree node_pep node_dag node_push access_model non-empty string open presence open whitelist deliver_notifications boolean true true true true deliver_payloads boolean true true true true max_items non-negative integer 10 1 10 1 max_payload_size non-negative integer 60000 60000 60000 60000 node_type non-empty string N/A N/A leaf N/A notification_type non-empty string headline headline headline headline notify_config boolean false false false false notify_delete boolean false false false false notify_retract boolean false false false false persist_items boolean true true true false presence_based_delivery boolean false true false true publish_model non-empty string publishers publishers publishers open purge_offline boolean false false false false roster_groups_allowed non-empty string [] [] [] [] send_last_published_item non-empty string never on_sub_and_presence never on_sub_and_presence subscribe boolean true true true true"},{"location":"modules/mod_pubsub/#modulesmod_pubsubitem_publisher","title":"modules.mod_pubsub.item_publisher","text":"
  • Syntax: boolean
  • Default: false
  • Example: item_publisher = false

When enabled, a JID of the publisher will be saved in the item metadata. This effectively makes them an owner of this item.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubsync_broadcast","title":"modules.mod_pubsub.sync_broadcast","text":"
  • Syntax: boolean
  • Default: false
  • Example: sync_broadcast = false

If false, routing of notifications to subscribers is done in a separate Erlang process. As a consequence, some notifications may arrive to the subscribers in the wrong order (however, the two events would have to be published at the exact same time).

"},{"location":"modules/mod_pubsub/#cache-backend","title":"Cache Backend","text":"

Caching is disabled by default. You may enable it by specifying the backend it should use. It is not coupled with the main DB backend, so it is possible to store the cached data in mnesia, while the actual PubSub information is kept in RDBMS (and vice versa!).

"},{"location":"modules/mod_pubsub/#example-configuration","title":"Example Configuration","text":"
[modules.mod_pubsub]\n  access_createnode = \"pubsub_createnode\"\n  ignore_pep_from_offline = false\n  backend = \"rdbms\"\n  last_item_cache = \"mnesia\"\n  max_items_node = 1000\n  plugins = [\"flat\", \"pep\"]\n\n  [[modules.mod_pubsub.pep_mapping]]\n    namespace = \"urn:xmpp:microblog:0\"\n    node = \"mb\"\n
"},{"location":"modules/mod_pubsub/#nodetrees","title":"Nodetrees","text":"

Called on get, create and delete node. Only one nodetree can be used per host and is shared by all node plugins.

"},{"location":"modules/mod_pubsub/#tree","title":"\"tree\"","text":"

Stores nodes in a tree structure. Every node name must be formatted like a UNIX path (e.g. /top/middle/leaf). When a node is created, its direct ancestor must already exist, so in order to create /top/middle/leaf, /top/middle is needed. A user may create any top-level node. A user may create a subnode of a node, only if they own it or it was created by the service.

"},{"location":"modules/mod_pubsub/#dag","title":"\"dag\"","text":"

Provides experimental support for XEP-0248: PubSub Collection Nodes. In this case you should also add the \"dag\" node plugin as default, for example: plugins = [\"dag\", \"flat\", \"hometree\", \"pep\"].

"},{"location":"modules/mod_pubsub/#plugins","title":"Plugins","text":"

They handle affiliations, subscriptions and items and also provide default node con\ufb01guration and features. PubSub clients can define which plugin to use when creating a node by adding type='plugin-name' attribute to the create stanza element. If such an attribute is not specified, the default plugin will be the first on the plugin list.

"},{"location":"modules/mod_pubsub/#flat","title":"\"flat\"","text":"

No node hierarchy. It handles the standard PubSub case.

"},{"location":"modules/mod_pubsub/#hometree","title":"\"hometree\"","text":"

Uses the exact same features as the flat plugin but additionally organises nodes in a tree. Basically it follows a scheme similar to the filesystem's structure. Every user can create nodes in their own home root: e.g /home/user. Each node can contain items and/or sub-nodes.

"},{"location":"modules/mod_pubsub/#pep","title":"\"pep\"","text":"

Implementation of XEP-0163: Personal Eventing Protocol. In this case, items are not persisted but kept in an in-memory cache. When the pep plugin is enabled, a user can have their own node (exposed as their bare jid) with a common namespace. Requires module mod_caps to be enabled.

For XEP-0384: OMEMO Encryption, it might be required to configure the access_model to open or override the default access_model in the following way:

[modules.mod_pubsub]\n  access_createnode = \"pubsub_createnode\"\n  plugins = [\"pep\"]\n  default_node_config = {access_model = \"open\"}\n
"},{"location":"modules/mod_pubsub/#dag_1","title":"\"dag\"","text":"

Implementation of XEP-0248: PubSub Collection Nodes. Every node takes a place in a collection and becomes either a collection node (and have only sub-nodes) or a leaf node (contains only items).

"},{"location":"modules/mod_pubsub/#push","title":"\"push\"","text":"

Special node type that may be used as a target node for XEP-0357: Push Notifications capable services (e.g. mod_event_pusher_push). For each published notification, a hook push_notification is run. You may enable as many modules that support this hook (all module with mod_push_service_* name prefix) as you like (see for example mod_push_service_mongoosepush). This node type requires publish-options with at least device_id and service fields supplied.

"},{"location":"modules/mod_pubsub/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit the MongooseIM metrics page.

"},{"location":"modules/mod_pubsub/#overall-pubsub-action-metrics","title":"Overall PubSub action metrics","text":"

For every PubSub action, like node creation, subscription, publication the following metrics are available:

  • count - a spiral metric showing the number of given action invocations
  • errors - a spiral metric counting the errors for a given action
  • time - a histogram metric showing the time it took to finish the action in case of success

Below there is a table describing all metrics related to PubSub actions

Name Description (when it gets incremented) [HOST, pubsub, get, affiliations, TYPE] When node's affiliations are read [HOST, pubsub, get, configure, TYPE] When node's configuration is read [HOST, pubsub, get, default, TYPE] When node's defaults are read [HOST, pubsub, get, items, TYPE] When node's items are read [HOST, pubsub, get, options, TYPE] When node's options are read [HOST, pubsub, get, subscriptions, TYPE] When node's subscriptions are read [HOST, pubsub, set, affiliations, TYPE] When node's subscriptions are set [HOST, pubsub, set, configure, TYPE] When node's configuration is set [HOST, pubsub, set, create, TYPE] When node is created [HOST, pubsub, set, delete, TYPE] When node is deleted [HOST, pubsub, set, options, TYPE] When node's options are set [HOST, pubsub, set, publish, TYPE] When an item is published [HOST, pubsub, set, purge, TYPE] When node's items are purged [HOST, pubsub, set, retract, TYPE] When node's items are retracted [HOST, pubsub, set, subscribe, TYPE] When a subscriber subscribes to a node [HOST, pubsub, set, subscriptions, TYPE] When a subscription is set (for instance accepted) [HOST, pubsub, set, unsubscribe, TYPE] When a subscriber unsubscribes

Where:

  • HOST is the XMPP host for which mod_pubsub is running. Can be set to global if all metrics are set to be global.
  • TYPE is one of the following count, errors, time (described above the table)
"},{"location":"modules/mod_pubsub/#backend-operations","title":"Backend operations","text":"

The are also more detailed metrics measuring execution time of backend operations.

Metrics for these actions may be found under mod_pubsub_db subkey.

Backend action Description (when it gets incremented) get_state User's state for a specific node is fetched. get_states Node's states are fetched. get_states_by_lus Nodes' states for user + domain are fetched. get_states_by_bare Nodes' states for bare JID are fetched. get_states_by_full Nodes' states for full JID are fetched. get_own_nodes_states State data for user's nodes is fetched. create_node A node's owner is set. del_node All data related to a node is removed. get_items Node's items are fetched. get_item A specific item from a node is fetched. add_item An item is upserted into a node. set_item An item is updated in a node. del_item An item is deleted from a node. del_items Specified items are deleted from a node. set_node A node is upserted. find_node_by_id A node is fetched by its ID. find_nodes_by_key Nodes are fetched by key. find_node_by_name A node is fetched by its name. del_node A node is deleted. get_subnodes Subnodes of a node are fetched. get_subnodes_tree Full tree of subnodes of a node is fetched. get_parentnodes_tree All parents of a node are fetched."},{"location":"modules/mod_push_service_mongoosepush/","title":"mod_push_service_mongoosepush","text":""},{"location":"modules/mod_push_service_mongoosepush/#module-description","title":"Module Description","text":"

This module handles the push_notification hook generated by mod_pubsub with an active push node. Each push_notification hook is converted as a REST API call to the MongoosePush service. You can find the full list of supported publish-options here.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_push_service_mongoosepush/#prerequisites","title":"Prerequisites","text":"

This module uses a connection pool via mongoose_http_client. It must be defined in outgoing_pools setting.

"},{"location":"modules/mod_push_service_mongoosepush/#options","title":"Options","text":""},{"location":"modules/mod_push_service_mongoosepush/#modulesmod_push_service_mongoosepushpool_name","title":"modules.mod_push_service_mongoosepush.pool_name","text":"
  • Syntax: non-empty string
  • Default: \"undefined\"
  • Example: pool_name = \"mongoose_push_http\"

The name of the pool to use (as defined in outgoing_pools).

"},{"location":"modules/mod_push_service_mongoosepush/#modulesmod_push_service_mongoosepushapi_version","title":"modules.mod_push_service_mongoosepush.api_version","text":"
  • Syntax: string, \"v2\" or \"v3\"
  • Default: \"v3\"
  • Example: api_version = \"v3\"

REST API version to be used.

"},{"location":"modules/mod_push_service_mongoosepush/#modulesmod_push_service_mongoosepushmax_http_connections","title":"modules.mod_push_service_mongoosepush.max_http_connections","text":"
  • Syntax: non-negative integer
  • Default: 100
  • Example: max_http_connections = 100

The maximum amount of concurrent HTTP connections.

"},{"location":"modules/mod_push_service_mongoosepush/#example-configuration","title":"Example configuration","text":"
[outgoing_pools.http.mongoose_push_http]\n  scope = \"global\"\n  workers = 50\n\n  [outgoing_pools.http.mongoose_push_http.connection]\n    host = \"https://localhost:8443\"\n    path_prefix = \"/\"\n    request_timeout = 2000\n\n[modules.mod_push_service_mongoosepush]\n  pool_name = \"mongoose_push_http\"\n  api_version = \"v3\"\n  max_http_connections = 100\n
"},{"location":"modules/mod_register/","title":"mod_register","text":""},{"location":"modules/mod_register/#module-description","title":"Module Description","text":"

This module implements XEP-0077: In-Band Registration, allowing users to register accounts on the server via XMPP. Use of this module on Internet-facing servers is not recommended.

"},{"location":"modules/mod_register/#options","title":"Options","text":""},{"location":"modules/mod_register/#modulesmod_registeriqdisctype","title":"modules.mod_register.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_register/#modulesmod_registeraccess","title":"modules.mod_register.access","text":"
  • Syntax: string, rule name or \"all\"
  • Default: \"all\"
  • Example: access = \"all\"

Defines which access rule should be used for checking if a chosen username is allowed for registration.

"},{"location":"modules/mod_register/#modulesmod_registerwelcome_message","title":"modules.mod_register.welcome_message","text":"
  • Syntax: TOML table with the following keys: \"body\", \"subject\" and string values.
  • Default: {subject = \"\", body = \"\"}
  • Example: welcome_message = {subject = \"Hello from MIM!\", body = \"Message body.\"}

Body and subject of a <message> stanza sent to new users. Only one of the fields (but non-empty) is mandatory for the message to be sent.

"},{"location":"modules/mod_register/#modulesmod_registerregistration_watchers","title":"modules.mod_register.registration_watchers","text":"
  • Syntax: array of strings
  • Default: []
  • Example: registration_watchers = [\"JID1\", \"JID2\"]

List of JIDs, which should receive a <message> notification about every successful registration.

"},{"location":"modules/mod_register/#modulesmod_registerpassword_strength","title":"modules.mod_register.password_strength","text":"
  • Syntax: non-negative integer
  • Default: 0
  • Example: password_strength = 32

Specifies minimal entropy of allowed password. Entropy is measured with ejabberd_auth:entropy/1. When set to 0, the password strength is not checked. Recommended minimum is 32. The entropy calculation algorithm is described in a section below.

"},{"location":"modules/mod_register/#modulesmod_registerip_access","title":"modules.mod_register.ip_access","text":"
  • Syntax: Array of TOML tables with the following mandatory content:

    • address - string, IP address
    • policy - string, one of: \"allow\", \"deny\".
  • Default: []

  • Example: ip_access = [ {address = \"127.0.0.0/8\", policy = \"allow\"}, {address = \"0.0.0.0/0\", policy = \"deny\"} ]

Access list for specified IPs or networks. Default value allows registration from every IP.

"},{"location":"modules/mod_register/#example-configuration","title":"Example configuration","text":"

Allow registrations from localhost:

[modules.mod_register]\n  welcome_message = {subject = \"Hello from MIM!\", body = \"Message body.\"}\n  ip_access = [\n    {address = \"127.0.0.1\", policy = \"allow\"}\n  ]\n  access = \"register\"\n

Deny registration from network 10.20.0.0 with mask 255.255.0.0.

[modules.mod_register]\n  ip_access = [\n    {address = \"10.20.0.0/16\", policy = \"deny\"}\n  ]\n

"},{"location":"modules/mod_register/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [Host, modRegisterCount] spiral A user registers via mod_register module. [Host, modUnregisterCount] spiral A user unregisters via mod_register module."},{"location":"modules/mod_register/#entropy-calculation-algorithm","title":"Entropy calculation algorithm","text":"
Entropy = length(Password) * log(X) / log(2)\n

Where X is initially set to 0 and certain values are added if at least one of these bytes are present:

  • Lower case character: 26
  • Upper case character: 26
  • Digit: 9
  • Printable ASCII (0x21 - 0x7e): 33
  • Any other value: 128

Note

These values are added only once, no matter how many bytes of specific type are found.

"},{"location":"modules/mod_register/#example-entropies","title":"Example entropies","text":"
  • kotek: ~23.5
  • abc123: ~30.8
  • L33tSp34k: ~53.4
  • CamelCase: ~51.3
  • lowUP1#:: ~45.9
  • lowUP1#\u2764: ~78
"},{"location":"modules/mod_roster/","title":"mod_roster","text":""},{"location":"modules/mod_roster/#module-description","title":"Module Description","text":"

The module implements roster support, specified in RFC 6121. Includes support for XEP-0237: Roster Versioning. It can sometimes become quite a heavyweight feature, so there is an option to disable it.

"},{"location":"modules/mod_roster/#options","title":"Options","text":""},{"location":"modules/mod_roster/#modulesmod_rosteriqdisctype","title":"modules.mod_roster.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_roster/#modulesmod_rosterversioning","title":"modules.mod_roster.versioning","text":"
  • Syntax: boolean
  • Default: false
  • Example: versioning = true

Turn on/off support for Roster Versioning.

"},{"location":"modules/mod_roster/#modulesmod_rosterstore_current_id","title":"modules.mod_roster.store_current_id","text":"
  • Syntax: boolean
  • Default: false
  • Example: store_current_id = true

Stores the last roster hash in DB (used in Roster Versioning). Improves performance but should be disabled, when shared rosters are used.

"},{"location":"modules/mod_roster/#modulesmod_rosterbackend","title":"modules.mod_roster.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"
"},{"location":"modules/mod_roster/#example-configuration","title":"Example configuration","text":"
[modules.mod_roster]\n  versioning = true\n  store_current_id = true\n
"},{"location":"modules/mod_roster/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) read_roster_version Version of a user's roster is retrieved. write_roster_version Vversion of a user's roster is stored. get_roster A user's roster is fetched. get_roster_entry A specific roster entry is fetched. get_roster_entry_t A specific roster entry is fetched inside a transaction. get_subscription_lists A subscription list of a user is retrieved. roster_subscribe_t A subscription status between users is updated inside a transaction. update_roster_t A roster entry is updated in a transaction. del_roster_t A roster entry is removed inside a transaction."},{"location":"modules/mod_sasl2/","title":"mod_sasl2","text":""},{"location":"modules/mod_sasl2/#module-description","title":"Module Description","text":"

Implements XEP-0388: Extensible SASL Profile.

"},{"location":"modules/mod_shared_roster_ldap/","title":"mod_shared_roster_ldap","text":""},{"location":"modules/mod_shared_roster_ldap/#module-description","title":"Module Description","text":"

This module injects roster entries fetched from LDAP. It might get quite complicated to configure it properly, so proceed with caution.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_shared_roster_ldap/#options-general","title":"Options: general","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldappool_tag","title":"modules.mod_shared_roster_ldap.pool_tag","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapbase","title":"modules.mod_shared_roster_ldap.base","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapderef","title":"modules.mod_shared_roster_ldap.deref","text":"

These 3 options are the same as for the LDAP authentication module.

"},{"location":"modules/mod_shared_roster_ldap/#options-attributes","title":"Options: attributes","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgroupattr","title":"modules.mod_shared_roster_ldap.groupattr","text":"
  • Syntax: string
  • Default: \"cn\"
  • Example: groupattr = \"cn\"

Provides a group name.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgroupdesc","title":"modules.mod_shared_roster_ldap.groupdesc","text":"
  • Syntax: string
  • Default: the value of groupattr
  • Example: groupdesc = \"cn\"

Provides a group description.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapuserdesc","title":"modules.mod_shared_roster_ldap.userdesc","text":"
  • Syntax: string
  • Default: \"cn\"
  • Example: userdesc = \"cn\"

Provides a human-readable user name.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapuseruid","title":"modules.mod_shared_roster_ldap.useruid","text":"
  • Syntax: string
  • Default: \"cn\"
  • Example: useruid = \"cn\"

Provides a username.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapmemberattr","title":"modules.mod_shared_roster_ldap.memberattr","text":"
  • Syntax: string
  • Default: \"memberUid\"
  • Example: memberattr = \"memberUid\"

Holds group members' IDs.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapmemberattr_format","title":"modules.mod_shared_roster_ldap.memberattr_format","text":"
  • Syntax: string
  • Default: \"%u\"
  • Example: memberattr_format = \"%u\"

Simple LDAP expression for extracting a user ID.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapmemberattr_format_re","title":"modules.mod_shared_roster_ldap.memberattr_format_re","text":"
  • Syntax: string
  • Default: \"\"
  • Example: memberattr_format_re = \"\"

Allows extracting the user ID with a regular expression.

"},{"location":"modules/mod_shared_roster_ldap/#options-parameters","title":"Options: parameters","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapauth_check","title":"modules.mod_shared_roster_ldap.auth_check","text":"
  • Syntax: boolean
  • Default: true
  • Example: auth_check = true

Enables checking if a shared roster entry actually exists in the XMPP database.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapuser_cache_validity","title":"modules.mod_shared_roster_ldap.user_cache_validity","text":"
  • Syntax: positive integer
  • Default: 300
  • Example: user_cache_validity = 300

Specifies in seconds how long are the roster entries kept in the cache.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgroup_cache_validity","title":"modules.mod_shared_roster_ldap.group_cache_validity","text":"
  • Syntax: positive integer
  • Default: 300
  • Example: group_cache_validity = 300

Specifies in seconds how long is the user's membership in a group kept in the cache.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapuser_cache_size","title":"modules.mod_shared_roster_ldap.user_cache_size","text":"
  • Syntax: positive integer
  • Default: 1000
  • Example: user_cache_size = 1000

Specifies how many shared roster items are kept in the cache.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgroup_cache_size","title":"modules.mod_shared_roster_ldap.group_cache_size","text":"
  • Syntax: positive integer
  • Default: 1000
  • Example: group_cache_size = 1000

Specifies how many roster group entries are kept in cache.

"},{"location":"modules/mod_shared_roster_ldap/#options-ldap-filters","title":"Options: LDAP filters","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldaprfilter","title":"modules.mod_shared_roster_ldap.rfilter","text":"
  • Syntax: string
  • Default: \"\"
  • Example: rfilter = \"\"

Used to find names of all shared roster groups.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgfilter","title":"modules.mod_shared_roster_ldap.gfilter","text":"
  • Syntax: string
  • Default: \"\"
  • Example: gfilter = \"\"

Used for retrieving the human-readable name and the members of a group.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapufilter","title":"modules.mod_shared_roster_ldap.ufilter","text":"
  • Syntax: string
  • Default: \"\"
  • Example: ufilter = \"\"

Used for retrieving the human-readable name of the roster entries.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapfilter","title":"modules.mod_shared_roster_ldap.filter","text":"
  • Syntax: string
  • Default: \"\"
  • Example: filter = \"(objectClass=inetOrgPerson)\"

Filter AND-ed with previous filters.

"},{"location":"modules/mod_shared_roster_ldap/#example-configuration","title":"Example Configuration","text":"
[modules.mod_shared_roster_ldap]\n  base = \"ou=Users,dc=ejd,dc=com\"\n  groupattr = \"ou\"\n  memberattr = \"cn\"\n  userdesc = \"cn\"\n  filter = \"(objectClass=inetOrgPerson)\"\n  rfilter = \"(objectClass=inetOrgPerson)\"\n  group_cache_validity = 1\n  user_cache_validity = 1\n
"},{"location":"modules/mod_sic/","title":"mod_sic","text":""},{"location":"modules/mod_sic/#module-description","title":"Module Description","text":"

This module implements XEP-0279: Server IP Check. It allows clients to ask the server, what is the client IP and port from the server's perspective.

"},{"location":"modules/mod_sic/#options","title":"Options","text":""},{"location":"modules/mod_sic/#modulesmod_siciqdisctype","title":"modules.mod_sic.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_sic/#example-configuration","title":"Example Configuration","text":"
[modules.mod_sic]\n
"},{"location":"modules/mod_smart_markers/","title":"mod_smart_markers","text":""},{"location":"modules/mod_smart_markers/#module-description","title":"Module Description","text":"

Smart markers are an experimental feature, described in detail as our Open XMPP Extension for markers.

"},{"location":"modules/mod_smart_markers/#options","title":"Options","text":""},{"location":"modules/mod_smart_markers/#modulesmod_smart_markersiqdisc","title":"modules.mod_smart_markers.iqdisc","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming IQ requests. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_smart_markers/#modulesmod_smart_markersbackend","title":"modules.mod_smart_markers.backend","text":"
  • Syntax: string, one of \"rdbms\", \"rdbms_async\"
  • Default: \"rdbms\"
  • Example: backend = \"rdbms_async\"

Only RDBMS storage is supported, but rdbms means flushes to DB are synchronous with each message, while rdbms_async is instead asynchronous.

Regular rdbms has worse performance characteristics, but it has better consistency properties, as events aren't lost nor reordered. rdbms_async processes events asynchronously and potentially unloading a lot of aggregation from the DB. Like the case of the asynchronous workers for MAM, it is the preferred method, with the risk messages being lost on an ungraceful shutdown.

"},{"location":"modules/mod_smart_markers/#modulesmod_smart_markerskeep_private","title":"modules.mod_smart_markers.keep_private","text":"
  • Syntax: boolean
  • Default: false
  • Example: keep_private = true

This indicates if markers are meant to be private to the sender of the marker (setting keep_private as true), or if they can be public.

By default markers are public to the conversation where they are sent, so they'll be routed to all recipients, and anyone in the chat can see where its peers are at any time, i.e., the Facebook Messenger model; but they can be configured private, so markers won't be routed to anyone, and a user who fetches their status will only receive information for markers they have sent alone, i.e., the Slack model.

"},{"location":"modules/mod_smart_markers/#example-configuration","title":"Example configuration","text":"
[modules.mod_smart_markers]\n  backend = \"rdbms\"\n  iqdisc = \"parallel\"\n
"},{"location":"modules/mod_smart_markers/#implementation-details","title":"Implementation details","text":"

The current implementation has some limitations:

  • It does not verify that markers only move forwards, hence a user can, intentionally or accidentally, send a marker to an older message, and this would override newer ones.
  • It stores markers sent only for users served on a local domain. It does not store received markers, so if the peer is reached across federation, this module won't track markers for federated users. Therefore extensions that desire seeing not only the sender's markers but also the peer's markers, won't work with the current implementation across federated users.
"},{"location":"modules/mod_stream_management/","title":"mod_stream_management","text":""},{"location":"modules/mod_stream_management/#module-description","title":"Module Description","text":"

Enables XEP-0198: Stream Management. Implements logic regarding session resumption and acknowledgement as well as the management of the session tables and configuration.

"},{"location":"modules/mod_stream_management/#options","title":"Options","text":""},{"location":"modules/mod_stream_management/#modulesmod_stream_managementbackend","title":"modules.mod_stream_management.backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"

Backend for in-memory session data stored by this module.

Warning

The corresponding internal database has to be enabled.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementbuffer","title":"modules.mod_stream_management.buffer","text":"
  • Syntax: boolean
  • Default: true
  • Example: buffer = false

Enables buffer for messages to be acknowledged.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementbuffer_max","title":"modules.mod_stream_management.buffer_max","text":"
  • Syntax: positive integer or string \"infinity\"
  • Default: 100
  • Example: buffer_max = 500

Buffer size for messages yet to be acknowledged.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementack","title":"modules.mod_stream_management.ack","text":"
  • Syntax: boolean
  • Default: true
  • Example: ack = false

Enables ack requests to be sent from the server to the client.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementack_freq","title":"modules.mod_stream_management.ack_freq","text":"
  • Syntax: positive integer
  • Default: 1
  • Example: ack_freq = 3

Frequency of ack requests sent from the server to the client, e.g. 1 means a request after each stanza, 3 means a request after each 3 stanzas.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementresume_timeout","title":"modules.mod_stream_management.resume_timeout","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 600
  • Example: resume_timeout = 600

Timeout for the session resumption. Sessions will be removed after the specified number of seconds.

"},{"location":"modules/mod_stream_management/#stale_h-options","title":"Stale_h options","text":"

Enables keeping old server's <h> values after the resumption timed out. Disabled by default. When enabled, parameters for the garbage collection of these tables should be provided.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementstale_henabled","title":"modules.mod_stream_management.stale_h.enabled","text":"
  • Syntax: boolean
  • Default: false
  • Example: enabled = true

Enables stale_h configuration

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementstale_hrepeat_after","title":"modules.mod_stream_management.stale_h.repeat_after","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 1800 (half an hour)
  • Example: repeat_after = 1800

How often the garbage collection will run in the background to clean this table.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementstale_hgeriatric","title":"modules.mod_stream_management.stale_h.geriatric","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 3600 (one hour)
  • Example: geriatric = 3600

The maximum lifespan of a record in memory. After this, they will be chased for cleanup.

"},{"location":"modules/mod_stream_management/#example-configuration","title":"Example Configuration","text":"
[modules.mod_stream_management]\n  buffer_max = 30\n  ack_freq = 1\n  resume_timeout = 600\n  stale_h.enabled = true\n  stale_h.repeat_after = 1800\n  stale_h.geriatric = 3600\n
"},{"location":"modules/mod_stream_management/#implementation-details","title":"Implementation details","text":"

Stream management state data is stored under the mod_stream_management key in the #c2s_data.state_mod map. The state data record, sm_state, has the following fields:

  • buffer - buffered stanzas not yet acked by the user
  • buffer_size - number of stanzas buffered for the user
  • counter_in - number of stanzas received by the server (server's <h>)
  • counter_out - number of stanzas delivered to the user and acked by the user (user's <h>)
  • buffer_max - server's capacity for buffering
  • ack_freq - how often the server requests acks
  • peer - in case of stream resumption, the ejabberd_sm:sid() identifiying the old session, or gen_statem:from() identifying the new session.

mod_stream_management introduces a new resume_session state to the C2S state machine, that is used by a session being closed to allow stream resumption.

This module also has a Mnesia backend keeping a table defined as follows:

-record(sm_session,\n        {smid :: smid(),\n         sid :: ejabberd_sm:sid()\n        }).\n

where smid is a unique identifier \u2014 in this case a random binary, and sid is an opaque session identifier from ejabberd_sm, which is needed to find the previous session we want to resume from. This module implements hooks that run on connection removals and session cleanups, in order to clean records from a dying session; and it also implements registration callbacks, used when a session is registered for resumption.

XEP version 1.6 requires the server to attempt giving the user the value of the server's <h> when a session timed out and cannot be resumed anymore. To be compliant with it, there's a second optional table:

-record(stream_mgmt_stale_h,\n        {smid :: smid(),\n         h :: non_neg_integer(),\n         stamp :: non_neg_integer()\n        }).\n

This table is created, together with a gen_server responsible for cleaning up the tables, when stale_h is set to true with the proper garbage collection configuration. Then, when removing a record from the sm_session table (which happens when the state of the previous session is also dropped), a new record is added to this new table with the smid and h values of the dropped session, together with a timestamp. Next, when a new session attempting resumption queries mod_stream_management for the data behind a smid, mod_stream_management can answer with one of the following:

{sid, ejabberd_sm:sid()} | {stale_h, non_neg_integer()} | {error, smid_not_found}.\n

And mod_stream_management will pattern-match and act accordingly.

"},{"location":"modules/mod_time/","title":"mod_time","text":""},{"location":"modules/mod_time/#module-description","title":"Module Description","text":"

This module enables support for communicating the local time of an entity. It reports time in UTC according to the entity as well as the offset from UTC. Protocol is described under XEP-0202: Entity Time.

"},{"location":"modules/mod_time/#options","title":"Options","text":""},{"location":"modules/mod_time/#modulesmod_timeiqdisctype","title":"modules.mod_time.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_time/#example-configuration","title":"Example Configuration","text":"
[modules.mod_time]\n
"},{"location":"modules/mod_vcard/","title":"mod_vcard","text":""},{"location":"modules/mod_vcard/#module-description","title":"Module Description","text":"

This module provides support for vCards, as specified in XEP-0054: vcard-temp and XEP-0055: Jabber Search.

"},{"location":"modules/mod_vcard/#options","title":"Options","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardiqdisctype","title":"modules.mod_vcard.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"parallel\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_vcard/#modulesmod_vcardhost","title":"modules.mod_vcard.host","text":"
  • Syntax: string
  • Default: \"vjud.@HOST@\"
  • Example: host = \"vjud.@HOST@\"

Domain of the vCard User Directory, used for searching. @HOST@ is replaced with the domain(s) supported by the cluster.

"},{"location":"modules/mod_vcard/#modulesmod_vcardsearch","title":"modules.mod_vcard.search","text":"
  • Syntax: boolean
  • Default: true
  • Example: search = false

Enables/disables the domain set in the previous option. false makes searching for users impossible.

"},{"location":"modules/mod_vcard/#modulesmod_vcardbackend","title":"modules.mod_vcard.backend","text":"
  • Syntax: string, one of \"ldap\", \"rdbms\", \"mnesia\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

vCard storage backend.

Warning

LDAP backend is read-only.

"},{"location":"modules/mod_vcard/#modulesmod_vcardmatches","title":"modules.mod_vcard.matches","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 30
  • Example: matches = 10

Maximum search results to be returned to the user.

"},{"location":"modules/mod_vcard/#ldap-specific-options","title":"LDAP-specific options","text":"

The following options are the same as for the LDAP authentication module:

"},{"location":"modules/mod_vcard/#modulesmod_vcardldappool_tag","title":"modules.mod_vcard.ldap.pool_tag","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapbase","title":"modules.mod_vcard.ldap.base","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapuids","title":"modules.mod_vcard.ldap.uids","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapfilter","title":"modules.mod_vcard.ldap.filter","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapderef","title":"modules.mod_vcard.ldap.deref","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapvcard_map","title":"modules.mod_vcard.ldap.vcard_map","text":"
  • Syntax: Array of TOML tables with the following keys: \"vcard_field\", \"ldap_pattern\", \"ldap_field\" and string values.
  • Default: see description
  • Example: vcard_map = [{vcard_field = \"FN\", ldap_pattern = \"%s\", ldap_field = \"displayName\"}]

Mappings between VCard and LDAP fields. For the default settings, please see [MongooseIM root]/src/mod_vcard_ldap.erl.

"},{"location":"modules/mod_vcard/#modulesmod_vcardldapsearch_fields","title":"modules.mod_vcard.ldap.search_fields","text":"
  • Syntax: Array of TOML tables with the following keys: \"search_field\", \"ldap_field\" and string values.
  • Default: see description
  • Example: search_fields = [{search_field = \"User\", ldap_field = \"%u\"}]

Mappings between the human-readable search fields and LDAP fields. For the default settings, please see [MongooseIM root]/src/mod_vcard_ldap.erl.

"},{"location":"modules/mod_vcard/#modulesmod_vcardldapsearch_reported","title":"modules.mod_vcard.ldap.search_reported","text":"
  • Syntax: Array of TOML tables with the following keys: \"search_field\", \"vcard_field\" and string values.
  • Default: see description
  • Example: search_reported = [{search_field = \"Full Name\", vcard_field = \"FN\"}]

Mappings between the human-readable search fields and VCard fields. For the default settings, please see [MongooseIM root]/src/mod_vcard_ldap.erl.

"},{"location":"modules/mod_vcard/#modulesmod_vcardldapsearch_operator","title":"modules.mod_vcard.ldap.search_operator","text":"
  • Syntax: string, one of \"or\", \"and\"
  • Default: \"and\"
  • Example: search_operator = \"or\"

A default operator used for search query items.

"},{"location":"modules/mod_vcard/#modulesmod_vcardldapbinary_search_fields","title":"modules.mod_vcard.ldap.binary_search_fields","text":"
  • Syntax: array of strings
  • Default: []
  • Example: binary_search_fields = [\"User\", \"Full Name\"]

An array of search fields, which values should be Base64-encoded by MongooseIM before sending to LDAP.

"},{"location":"modules/mod_vcard/#example-configuration","title":"Example Configuration","text":"
[modules.mod_vcard]\n  matches = 1\n  search = true\n  host = \"directory.example.com\"\n\n  [[modules.mod_vcard.ldap.vcard_map]]\n    vcard_field = \"FAMILY\"\n    ldap_pattern = \"%s\"\n    ldap_field = \"sn\"\n\n  [[modules.mod_vcard.ldap.vcard_map]]\n    vcard_field = \"FN\"\n    ldap_pattern = \"%s\"\n    ldap_field = \"displayName\"\n\n  [[modules.mod_vcard.ldap.search_fields]]\n    search_field = \"User\"\n    ldap_field = \"%u\"\n\n  [[modules.mod_vcard.ldap.search_fields]]\n    search_field = \"Full Name\"\n    ldap_field = \"displayName\"\n\n  [[modules.mod_vcard.ldap.search_reported]]\n    search_field = \"Full Name\"\n    vcard_field = \"FN\"\n\n  [[modules.mod_vcard.ldap.search_reported]]\n    search_field = \"Given Name\"\n    vcard_field = \"FIRST\"\n
"},{"location":"modules/mod_vcard/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) set_vcard A vCard is set in a DB. get_vcard A specific vCard is retrieved from a DB. search A vCard search is performed."},{"location":"modules/mod_version/","title":"mod_version","text":""},{"location":"modules/mod_version/#module-description","title":"Module description","text":"

This module provides the functionality specified in XEP-0092: Software Version.

"},{"location":"modules/mod_version/#options","title":"Options","text":""},{"location":"modules/mod_version/#modulesmod_versioniqdisctype","title":"modules.mod_version.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_version/#modulesmod_versionos_info","title":"modules.mod_version.os_info","text":"
  • Syntax: boolean
  • Default: false
  • Example: os_info = true

Determines whether information about the operating system will be included.

"},{"location":"modules/mod_version/#example-configuration","title":"Example configuration","text":"
[modules.mod_version]\n  os_info = true\n
"},{"location":"open-extensions/inbox/","title":"Inbox","text":"

When a messaging client starts, it typically builds a UI showing a list of recent chats, with metadata attached to them like, whether any chat has new messages and how many, or if it is fully read, or if they are for example muted and until when. In MongooseIM this functionality is provided by mod_inbox.

"},{"location":"open-extensions/inbox/#terminology","title":"Terminology:","text":""},{"location":"open-extensions/inbox/#the-inbox","title":"The Inbox","text":"

It is personal to a given user and represents the current status of the conversations of that user. It's the front-page of the chat feature.

"},{"location":"open-extensions/inbox/#inbox-entry","title":"Inbox entry","text":"

It is a specific conversation, that the user can identify by the recipient jid, that is, the user jid in case of a one-to-one chat, or the room jid in case of a group-chat.

"},{"location":"open-extensions/inbox/#box-also-referred-to-as-folder","title":"Box (also referred to as \"folder\")","text":"

A category where entries can be classified. The default box is the active box, simply called inbox. There is a second box, called archive, where entries can be thrown into and not displayed by default. More boxes can be created through configuration.

"},{"location":"open-extensions/inbox/#entity-use-cases","title":"Entity Use Cases","text":""},{"location":"open-extensions/inbox/#discovering-inbox-services","title":"Discovering Inbox Services","text":"

An entity can discover the inbox service via a Features Discovery request:

<!-- Client -->\n<iq type='get' id='a96d4244760853af7b3ae84faa1a40fb' to='localhost'>\n    <query xmlns='http://jabber.org/protocol/disco#info'/>\n</iq>\n\n<!-- Server -->\n<iq from='localhost' to='alice@localhost/res1' id='a96d4244760853af7b3ae84faa1a40fb' type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#info'>\n        <identity category='server' type='im' name='MongooseIM'/>\n        <feature var='erlang-solutions.com:xmpp:inbox:0'/>\n    </query>\n</iq>\n

"},{"location":"open-extensions/inbox/#fetching-the-inbox","title":"Fetching the inbox","text":""},{"location":"open-extensions/inbox/#querying","title":"Querying","text":"

The inbox is fetched using regular XMPP Data Forms. To request the supported form, the client should send:

<!-- Client -->\n<iq type='get' id='some_unique_id'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0'/>\n</iq>\n\n<!-- Server -->\n<iq from='alice@localhost' to='alice@localhost/res1' id='some_unique_id' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <x xmlns='jabber:x:data' type='form'>\n      <field type='hidden' var='FORM_TYPE'><value>erlang-solutions.com:xmpp:inbox:0</value></field>\n      <field var='start' type='text-single'/>\n      <field var='end' type='text-single'/>\n      <field var='order' type='list-single'>\n        <value>desc</value>\n        <option label='Ascending by timestamp'><value>asc</value></option>\n        <option label='Descending by timestamp'><value>desc</value></option>\n      </field>\n      <field var='hidden_read' type='text-single' value='false'/>\n      <field var='box' type='list-simple' value='all'>\n        <option label='all'><value>all</value></option>\n        <option label='inbox'><value>inbox</value></option>\n        <option label='archive'><value>archive</value></option>\n        <option label='bin'><value>bin</value></option>\n      </field>\n      <field var='archive' type='boolean'/>\n    </x>\n  </query>\n</iq>\n

To fetch the inbox, the client should send:

<iq type='set' id='10bca'>\n  <inbox xmlns='erlang-solutions.com:xmpp:inbox:0' queryid='b6'/>\n</iq>\n

Then the client should receive:

<message from=\"alice@localhost\" to=\"alice@localhost/res1\" id=\"9b759\">\n  <result xmlns=\"erlang-solutions.com:xmpp:inbox:0\" unread=\"0\" queryid=\"b6\">\n    <forwarded xmlns=\"urn:xmpp:forward:0\">\n      <delay xmlns=\"urn:xmpp:delay\" stamp=\"2018-07-10T23:08:25.123456Z\"/>\n      <message xml:lang=\"en\" type=\"chat\" to=\"bob@localhost/res1\" from=\"alice@localhost/res1\" id=\u201d123\u201d>\n        <body>Hello</body>\n      </message>\n    </forwarded>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n  </result>\n</message>\n\n<iq from=\"alice@localhost\" to=\"alice@localhost/res1\" id=\"b6\" type=\"result\">\n  <fin xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <count>1</count>\n    <unread-messages>0</unread-messages>\n    <active-conversations>0</active-conversations>\n  </fin>\n</iq>\n
where none-or-many message stanzas are sent to the requesting resource describing each inbox entry, and a final iq-fin stanza marks the end of the inbox query, Inbox query result IQ stanza returns the following values:

  • count: the total number of conversations (if hidden_read value was set to true, this value will be equal to active_conversations)
  • unread-messages: total number of unread messages from all conversations
  • active-conversations: the number of conversations with unread message(s)

Note that the queryid field is optional, and if not provided, the answers will fall back to the id field of the IQ query.

"},{"location":"open-extensions/inbox/#filtering-and-ordering","title":"Filtering and ordering","text":"

Inbox query results may be filtered by time range and box, and sorted by timestamp. By default, mod_inbox returns all conversations, listing the ones updated most recently first.

A client may specify the following parameters:

  • variable start: Start date for the result set (value: ISO timestamp)
  • variable end: End date for the result set (value: ISO timestamp)
  • variable order: Order by timestamp (values: asc, desc)
  • variable hidden_read: Show only conversations with unread messages (values: true, false)
  • variable box: Indicate which box is desired. Supported are all, inbox, archive and bin. More boxes can be implemented, see mod_inbox \u2013 Boxes. If not provided, all except the bin are returned.
  • variable archive [deprecated, prefer box]: whether to query the archive inbox. true means querying only the archive box, false means querying only the active box. If the flag is not set, it is assumed all entries are requested. This is kept for backwards compatibility reasons, use the box flag instead.

They are encoded inside a standard XMPP Data Forms format. Dates must be formatted according to XMPP Date and Time Profiles. It is not mandatory to add an empty data form if a client prefers to use default values (<inbox/> element may be empty). However, the IQ type must be \"set\", even when the data form is missing.

"},{"location":"open-extensions/inbox/#limiting-the-query","title":"Limiting the query","text":"

It can happen that the amount of inbox entries is too big for a given user, even after filtering by start and end as already available in mod_inbox. Hence, we need to set a fixed limit of the number of entries that are requested. For this, we can use a <max> attribute as defined in XEP-0059: #2.1 Limiting the Number of Items:

<iq type='set' id='10bca'>\n  <inbox xmlns='erlang-solutions.com:xmpp:inbox:0' queryid='b6'>\n    <x xmlns='jabber:x:data' type='form'>\n      <field type='hidden' var='FORM_TYPE'><value>erlang-solutions.com:xmpp:inbox:0</value></field>\n      <field type='list-single' var='order'><value>asc</value></field>\n      <field type='text-single' var='hidden_read'><value>true</value></field>\n      <field type='list-single' var='box'><value>inbox</value></field>\n    </x>\n    <set xmlns='http://jabber.org/protocol/rsm'>\n      <max>Max</max>\n    </set>\n  </inbox>\n</iq>\n
where Max is a non-negative integer.

Inbox also has partial support for pagination as described in XEP-0059. Note that therefore there are two ways to denote pages, the standard RSM mechanism and the custom inbox form. If both are used, the RSM marker will override the respective inbox form, as in, before will override start, and after will override end.

Note

Inbox pagination does not support total count nor indexes as described in XEP-0059: #2.6 Retrieving a Page Out of Order.

"},{"location":"open-extensions/inbox/#properties-of-an-entry","title":"Properties of an entry","text":"

Given an entry, certain properties are defined for such an entry:

"},{"location":"open-extensions/inbox/#box","title":"Box","text":"

Clients usually have two different boxes for the inbox: the regular one, simply called the inbox (or the active inbox), and an archive box, where clients can manually throw conversations they don't want displayed in the default UI. A third box is the trash bin, where deleted entries go and are cleaned up in regular intervals.

It is expected that entries will reside in the archive until they're either manually moved back to the active box, or they receive a new message: in such case the entry should jump back to the active box automatically.

More boxes can be implemented, see mod_inbox#boxes. Movement between boxes can be achieved through the right XMPP IQ, no more automatic movements are developed as in the case of inbox-archive.

"},{"location":"open-extensions/inbox/#read","title":"Read","text":"

Entries keep a count of unread messages that is incremented automatically upon receiving a new message, and (in the current implementation) set to zero upon receiving either a message by one-self, or an appropriate chat marker as defined in XEP-0333 (which markers reset the count is a matter of configuration, see doc).

This property can also be manually set to zero or to one using the appropriate requests as explained below.

"},{"location":"open-extensions/inbox/#muted","title":"Muted","text":"

Entries can be muted for given periods of time, and likewise, unmuted. This changes the UI representation, and also, means that the user won't get PNs (Push Notifications) for this entry, until the time set expires, or the user sets otherwise. Knowledge of this is necessary to help build the UI.

Expected times can be extended before the period has expired, without the need to first unmuting. When muting a conversation, the final timestamp will be calculated by the server as the current time plus the requested period, in seconds, to centralise knowledge of UTC clocks. When muting an already muted conversation, the timestamp is simply overridden following the previous specification.

"},{"location":"open-extensions/inbox/#other-properties","title":"Other properties","text":"

No more properties are expected, but one could envisage notions of flagging conversations with different colours, for example according to their urgency, or a client-specific category (work, personal, fitness, and whatnot), or pins to denote an entry should be always displayed (possibly in a special format, like on top of the box). The design of the protocol, and the implementation, aims to leave room for future extensions.

"},{"location":"open-extensions/inbox/#getting-properties","title":"Getting properties","text":"

To fetch all supported properties, a classic Data Form is used. Upon the client sending an iq-get without a jid:

<iq id='some_unique_id' type='get'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation'/>\n</iq>\n
The server would respond with:
<iq from='alice@localhost' to='alice@localhost/res1' id='some_unique_id' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation'>\n    <x xmlns='jabber:x:data' type='form'>\n      <field type='hidden' var='FORM_TYPE'><value>erlang-solutions.com:xmpp:inbox:0</value></field>\n      <field var='archive' type='boolean' value='false'/>\n      <field var='read' type='boolean' value='false'/>\n      <field var='mute' type='text-single' value='0'/>\n      <field var='box' type='list-simple' value='all'>\n        <option label='all'><value>all</value></option>\n        <option label='inbox'><value>inbox</value></option>\n        <option label='archive'><value>archive</value></option>\n        <option label='bin'><value>bin</value></option>\n      </field>\n    </x>\n  </query>\n</iq>\n

If the properties of a certain entry were to be fetched, it can easily be done with:

<iq id='some_unique_id' type='get'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'/>\n</iq>\n
To which the server will reply, just like before, with:
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation'>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </query>\n</iq>\n

If an entire entry wanted to be queried, and not only its attributes, a complete='true' can be provided:

<iq id='some_unique_id' type='get'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost' complete='true'/>\n</iq>\n
To which the server will reply, just like before, with:
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation'>\n    <forwarded xmlns=\"urn:xmpp:forward:0\">\n      <delay xmlns=\"urn:xmpp:delay\" stamp=\"2018-07-10T23:08:25.123456Z\"/>\n      <message xml:lang=\"en\" type=\"chat\" to=\"bob@localhost/res1\" from=\"alice@localhost/res1\" id=\u201d123\u201d>\n        <body>Hello</body>\n      </message>\n    </forwarded>\n    <archive>false</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </query>\n</iq>\n

"},{"location":"open-extensions/inbox/#setting-properties","title":"Setting properties","text":"

Setting properties is done using the standard XMPP pattern of iq-query and iq-result, as below:

<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <Property>Value</Property>\n    <!-- Possibly other properties -->\n  </query>\n</iq>\n
where Property and Value are a list of key-value pairs as follows:

  • box: inbox, archive, or a custom value if this has been extended.
  • archive: true or false
  • mute: number of seconds to mute for. Choose 0 for unmuting.
  • read (adjective, not verb): true or false. Setting to true essentially sets the unread-count to 0, false sets the unread-count to 1 (if it was equal to 0, otherwise it lefts it unchanged). No other possibilities are offered, to reduce the risk of inconsistencies or problems induced by a faulty client.

Note that resetting the inbox count will not be forwarded. While a chat marker will be forwarded to the interlocutor(s), (including the case of a big groupchat with thousands of participants), this reset stanza will not.

If the query was successful, the server will answer with two stanzas, following the classic pattern of broadcasting state changes. First, it would send a message with a <x> children containing all new configuration, to the bare-jid of the user: this facilitates broadcasting to all online resources to successfully synchronise their interfaces.

<message from='alice@localhost' to='alice@localhost' id='some_unique_id'>\n  <x xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </x>\n</message>\n
where <mute> may contain either a zero, to denote unmuted, or a RFC3339 timestamp, as in 2021-02-25T08:44:14.323836Z.

To the requesting resource, a simple iq-result would be then sent to notify of success, as required by the iq directives of the XMPP RFCs:

<iq id='some_unique_id' to='alice@localhost/res1' type='result'/>\n

If the request was not successful, the server would then answer as in:

<iq to='alice@localhost/res1' type='error'>\n  <error type='Type'>\n    <Condition xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n  </error>\n</iq>\n
Where Type will usually be modify or cancel, as explained in https://xmpp.org/rfcs/rfc6120.html#stanzas-error-syntax, and Condition is as explained in https://xmpp.org/rfcs/rfc6120.html#stanzas-error-conditions, bad-request being the most common.

This final syntax for the protocol has been chosen as it allows for better pipelining of requests, and it remains consistent with how, for example, rooms are configured for MUC-Light.

"},{"location":"open-extensions/inbox/#examples-archiving-an-entry","title":"Examples: archiving an entry","text":"

To put an entry into the archived box, the client can send:

<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>archive</box>\n  </query>\n</iq>\n
On success, the server would return (considering the entry has no unread messages and is not muted):
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>archive</box>\n    <archive>true</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </query>\n</iq>\n
If the client had sent an invalid number (negative, or NaN), the server would answer:
<iq to='alice@localhost/res1' type='error'>\n  <error type='modify'>\n    <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n  </error>\n</iq>\n

"},{"location":"open-extensions/inbox/#examples-emptying-the-trash-bin","title":"Examples: emptying the trash bin","text":"

A user can empty his trash bin, through the following request:

<iq id='some_unique_id' type='set'>\n  <empty-bin xmlns='erlang-solutions.com:xmpp:inbox:0'/>\n</iq>\n
On success, the server would return how many entries where dropped as in:
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <empty-bin xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <num>2</num>\n  </empty-bin>\n</iq>\n
The server might answer with a corresponding error message, might anything go wrong.

"},{"location":"open-extensions/inbox/#examples-muting-an-entry","title":"Examples: muting an entry","text":"

To mute an entry for a full day (86400 seconds in a day, 604800 in a week, for example), a client can send:

<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <mute>86400</mute>\n  </query>\n</iq>\n
On success, the server would return (considering the server receives the timestamp on \"2021-02-26T09:11:05.634232Z\", and the entry is on the active box and completely read):
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>2021-02-27T09:11:05.634232Z</mute>\n    <read>true</read>\n  </query>\n</iq>\n
If the client had sent an invalid number (negative, or NaN), the server would answer:
<iq to='alice@localhost/res1' type='error'>\n  <error type='modify'>\n    <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n  </error>\n</iq>\n
To unmute, similarly, the client can send:
<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <mute>0</mute>\n  </query>\n</iq>\n
And server responses will be similar.

"},{"location":"open-extensions/inbox/#examples-reading-an-entry","title":"Examples: reading an entry","text":"

To set an entry as read, the client can send:

<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <read>true</read>\n  </query>\n</iq>\n
On success, the server would return (considering the entry is not archived and not muted):
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </query>\n</iq>\n
On error, as usual, the client would get:
<iq to='alice@localhost/res1' type='error'>\n  <error type='modify'>\n    <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n  </error>\n</iq>\n
And similarly, to set a conversation as unread:
<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <read>false</read>\n  </query>\n</iq>\n

"},{"location":"open-extensions/inbox/#deprecated-reset-entry-stanza","title":"Deprecated reset entry stanza:","text":"

You can reset the inbox with the following stanza:

<iq type='set'>\n    <reset xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='interlocutor_bare_jid'/>\n</iq>\n
Here jid is the bare jid of the user whose inbox we want to reset. This action does not change the last message stored in inbox; meaning that neither this stanza nor anything given within will be stored; the only change is the inbox unread_count is set to zero.

"},{"location":"open-extensions/inbox/#example-request","title":"Example request","text":"
<!-- Alice sends: -->\n<message type=\"chat\" to=\"bob@localhost/res1\" id=\u201d123\u201d>\n  <body>Hello</body>\n</message>\n\n<!-- Bob receives: -->\n<message from=\"alice@localhost/res1\" to=\"bob@localhost/res1\" id=\u201c123\u201d xml:lang=\"en\" type=\"chat\">\n  <body>Hello</body>\n</message>\n\n<!-- Alice sends: -->\n<iq type=\"set\" id=\"10bca\">\n  <inbox xmlns=\"erlang-solutions.com:xmpp:inbox:0\" queryid=\"b6\">\n    <x xmlns='jabber:x:data' type='form'>\n      <field type='hidden' var='FORM_TYPE'><value>erlang-solutions.com:xmpp:inbox:0</value></field>\n      <field type='text-single' var='start'><value>2018-07-10T12:00:00Z</value></field>\n      <field type='text-single' var='end'><value>2018-07-11T12:00:00Z</value></field>\n      <field type='list-single' var='order'><value>asc</value></field>\n    </x>\n  </inbox>\n</iq>\n\n<!-- Alice receives: -->\n<message from=\"alice@localhost\" to=\"alice@localhost\" id=\"9b759\">\n  <result xmlns=\"erlang-solutions.com:xmpp:inbox:0\" unread=\"0\" queryid=\"b6\">\n    <forwarded xmlns=\"urn:xmpp:forward:0\">\n      <delay xmlns=\"urn:xmpp:delay\" stamp=\"2018-07-10T23:08:25.123456Z\"/>\n      <message xml:lang=\"en\" type=\"chat\" to=\"bob@localhost/res1\" from=\"alice@localhost/res1\" id=\u201d123\u201d>\n        <body>Hello</body>\n      </message>\n    </forwarded>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n  </result>\n</message>\n\n<iq from=\"alice@localhost\" to=\"alice@localhost/res1\" id=\"10bca\" type=\"result\">\n  <fin xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <count>1</count>\n    <unread-messages>0</unread-messages>\n    <active-conversations>0</active-conversations>\n  </fin>\n</iq>\n
"},{"location":"open-extensions/inbox/#example-error-response","title":"Example error response","text":"
<!--Alice sends request with invalid value of start field: -->\n<iq type='set' id='a78478f20103ff8354d7834d0ba2fdb2'>\n  <inbox xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <x xmlns='jabber:x:data' type='submit'>\n      <field type='text-single' var='start'>\n        <value>invalid</value>\n      </field>\n    </x>\n  </inbox>\n</iq>\n\n<!--Alice receives an error with description of the first encountered invalid value: -->\n<iq from='alice@localhost' to='alice@localhost/res1'\n    id='a78478f20103ff8354d7834d0ba2fdb2' type='error'>\n  <error code='400' type='modify'>\n    <bad-rquest xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    <text xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'>\n      Invalid inbox form field value, field=start, value=invalid\n    </text>\n  </error>\n</iq>\n
"},{"location":"open-extensions/mam/","title":"Message Archive Management extensions","text":""},{"location":"open-extensions/mam/#new-mam-filtering-fields","title":"New MAM filtering fields","text":"

The new fields allow to improve the performance of the counting queries for very big archives by changing how count and index functions work.

  • from-id - returns and counts messages with ids id >= from-id only (from-id is included into the set).
  • to-id - returns and counts messages with ids id <= to-id only (to-id is included into the set).
  • after-id - returns and counts messages with ids id > after-id only (after-id is not included into the set).
  • before-id - returns and counts messages with ids id < before-id only (before-id is not included into the set).
  • simple - do not return count and offset fields in the result.

The fields could be combined together. If two filters are provided, both would be applied to the result.

"},{"location":"open-extensions/mam/#get-new-messages-oldest-first","title":"Get new messages, oldest first","text":"

Example from pagination_first_page_after_id4 testcase:

The client has downloaded his archive and got disconnected. He knows, that the last message he has on his device has id=BO7CH1JOF801. He wants to receive new messages that were sent while he has been disconnected using a page size 5.

In this mode, the client would get the oldest messages first.

Testcase: the client has messages 1-15 in his archive.

<!-- Client sends -->\n<iq type='set' id='req1'>\n    <query xmlns='urn:xmpp:mam:1' queryid='first_page_after_id4'>\n        <x xmlns='jabber:x:data'>\n            <field var='after-id'>\n                <value>BO7CH1JOF801</value> <!-- id of the Message #4 -->\n            </field>\n        </x>\n        <set>\n            <max>5</max>\n        </set>\n    </query>\n</iq>\n\n<!-- Server sends -->\n<message from='alice@localhost' to='alice@localhost/res1' id='323372af-7d69-4f36-803d-110272066373'>\n    <result queryid='first_page_after_id4' xmlns='urn:xmpp:mam:1' id='BO7CH1JQR9O1'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2022-06-08T09:43:08.952999Z' from='alice@localhost/res1'/>\n            <message from='alice@localhost/res1' xmlns='jabber:client' xml:lang='en' to='bob@localhost/res1' type='chat'>\n                <body>Message #5</body>\n            </message>\n        </forwarded>\n    </result>\n</message>\n<!-- ... Messages 6, 7, 8  ... -->\n<message from='alice@localhost' to='alice@localhost/res1' id='a44d83f3-de47-4e71-a1e6-62100437fe2c'>\n    <result queryid='first_page_after_id4' xmlns='urn:xmpp:mam:1' id='BO7CH1K3TU01'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2022-06-08T09:43:08.990200Z' from='alice@localhost/res1'/>\n            <message from='alice@localhost/res1' xmlns='jabber:client' xml:lang='en' to='bob@localhost/res1' type='chat'>\n                <body>Message #9</body>\n            </message>\n        </forwarded>\n    </result>\n</message>\n\n<iq from='alice@localhost' to='alice@localhost/res1' id='req1' type='result'>\n    <fin xmlns='urn:xmpp:mam:1'>\n        <set xmlns='http://jabber.org/protocol/rsm'>\n            <first index='0'>BO7CH1JQR9O1</first> <!-- Id of the message #5 -->\n            <last>BO7CH1K3TU01</last> <!-- Id of the message #9 -->\n            <count>11</count> <!-- messages 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 -->\n        </set>\n    </fin>\n</iq>\n

Messages 1-4 are completely ignored in the count and in the index fields. If the client asked for 5 messages, but count is 11, he should ask for more messages.

<!-- Client sends -->\n<iq type='set' id='req2'>\n    <query xmlns='urn:xmpp:mam:1' queryid='first_page_after_id9'>\n        <x xmlns='jabber:x:data'>\n            <field var='after-id'>\n                <value>BO7CH1K3TU01</value> <!-- id of the Message #9 -->\n            </field>\n        </x>\n        <set>\n            <max>5</max>\n        </set>\n    </query>\n</iq>\n...\n
"},{"location":"open-extensions/mam/#get-new-messages-newest-first","title":"Get new messages, newest first","text":"

Sometimes we want to render the newest messages as fast as possible.

Though, if the client caches messages, he has to track which pages are still need to be requested, when using this method.

Example pagination_last_page_after_id4.

<!-- Client sends -->\n<iq type='set' id='req3'>\n    <query xmlns='urn:xmpp:mam:1' queryid='last_page_after_id4'>\n        <x xmlns='jabber:x:data'>\n            <field var='after-id'>\n                <value>BO7CUCVVS6O1</value>\n            </field>\n        </x>\n        <set>\n            <max>5</max>\n            <before/>\n        </set>\n    </query>\n</iq>\n\n<!-- Server sends -->\n<message from='alice@localhost' to='alice@localhost/res1' id='4917656e-a5cb-4f4a-9718-ed525a1202ee'>\n    <result queryid='last_page_after_id4' xmlns='urn:xmpp:mam:1' id='BO7CUD0L8B81'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2022-06-08T10:13:01.601837Z' from='alice@localhost/res1'/>\n            <message from='alice@localhost/res1' xmlns='jabber:client' xml:lang='en' to='bob@localhost/res1' type='chat'>\n                <body>Message #11</body>\n            </message>\n        </forwarded>\n    </result>\n</message>\n\n...\n\n<message from='alice@localhost' to='alice@localhost/res1' id='09987901-d53d-4b57-8b3c-5f3aaa2de99b'>\n    <result queryid='last_page_after_id4' xmlns='urn:xmpp:mam:1' id='BO7CUD0U4301'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2022-06-08T10:13:01.638156Z' from='alice@localhost/res1'/>\n            <message from='alice@localhost/res1' xmlns='jabber:client' xml:lang='en' to='bob@localhost/res1' type='chat'>\n                <body>Message #15</body>\n            </message>\n        </forwarded>\n    </result>\n</message>\n<iq from='alice@localhost' to='alice@localhost/res1' id='req3' type='result'>\n    <fin xmlns='urn:xmpp:mam:1'>\n        <set xmlns='http://jabber.org/protocol/rsm'>\n            <first index='6'>BO7CUD0L8B81</first> <!-- id of the message 11 -->\n            <last>BO7CUD0U4301</last> <!-- id of the message 15 -->\n            <count>11</count> <!-- messages 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 -->\n        </set>\n    </fin>\n</iq>\n

Because index is not zero, the client would have to send more queries to get all missing messages.

"},{"location":"open-extensions/mam/#disable-message-counting","title":"Disable message counting","text":"

Sometimes, we don't want to count messages at all. It would improve performance.

For example, if we want to request another page of the result set, we already would know the total number of messages from the first query.

Sometimes, total and offset values are not visible in the UI.

<!-- Client sends -->\n<iq type='set' id='req5'>\n    <query xmlns='urn:xmpp:mam:1' queryid='before10'>\n        <x xmlns='jabber:x:data'>\n            <field var='simple'>\n                <value>true</value>\n            </field>\n        </x>\n        <set>\n            <max>5</max>\n            <before>BO7DD6KDP0O1</before>\n        </set>\n    </query>\n</iq>\n\n...skip messages...\n<!-- Server returns messages and the final IQ -->\n<iq from='alice@localhost' to='alice@localhost/res1' id='req5' type='result'>\n    <fin xmlns='urn:xmpp:mam:1'>\n        <set xmlns='http://jabber.org/protocol/rsm'>\n            <first>BO7DD6K1E8G1</first>\n            <last>BO7DD6KBAAG1</last>\n        </set>\n    </fin>\n</iq>\n
"},{"location":"open-extensions/muc_light/","title":"MUC light","text":""},{"location":"open-extensions/muc_light/#1-introduction","title":"1. Introduction","text":"

Classic Multi-User chat, as described in XEP-0045, adds an IRC-like functionality to XMPP. It distinguishes between the affiliation list and the occupant list, where the latter is based on presences routed to the room from the client resource. While perfectly sufficient for desktop applications and relatively stable network connection, it does not exactly meet the challenges the mobile world it is facing. Modern mobile applications do not rely on presence information, as it can frequently change. The expected user experience not only differs from the IRC model, but also uses only a small subset of XEP-0045 features. The service described in this specification attempts to provide a complete solution for all common use cases of mobile group chats.

"},{"location":"open-extensions/muc_light/#2-requirements","title":"2. Requirements","text":"

Here are some high-level features required from a new variant of MUC

  1. The service allows any user to create a room for group communication.
  2. Users cannot join rooms on their own. They have to be added by the room owner or (if configured by service administrator) any other occupant.
  3. Only the owner can remove other occupants from the room.
  4. Every occupant can leave the room.
  5. A user may block the attempts of being added to the specific room or by specific user.
  6. The message sent in the room is always broadcasted to every occupant.
  7. The full occupant list is always available to all occupants.
  8. The occupant is always visible on the list, even if they do not have any resources online.
  9. Occupants can only have two affiliations: owner and member.
  10. There MUST be at most one owner in the room (the service can choose to treat all users equally).
  11. If the room becomes empty, it is destroyed.
  12. Occupants cannot hide behind nicks. Their real bare JID is always visible to everyone
  13. No exchange of any <presence/> stanza inside the room.
  14. The user MUST be able to retrieve the list of rooms they occupy.
  15. The owner can modify the room configuration at any time; members may also be allowed to set configuration.
  16. All occupants can get the full room configuration at any time.
  17. Room history is available only in Message Archive Management.
"},{"location":"open-extensions/muc_light/#3-entity-use-cases","title":"3. Entity Use Cases","text":""},{"location":"open-extensions/muc_light/#31-discovering-a-muc-light-service","title":"3.1. Discovering a MUC Light Service","text":"

An entity often discovers a MUC service by sending a Service Discovery items (\"disco#items\") request to its own server.

Entity Queries the Server for Associated Services

<iq from='hag66@shakespeare.lit/pda'\n    id='h7ns81g'\n    to='shakespeare.lit'\n    type='get'>\n    <query xmlns='http://jabber.org/protocol/disco#items'/>\n</iq>\n

The server then returns the services that are associated with it.

Server Returns a Disco Items Result

<iq from='shakespeare.lit'\n    id='h7ns81g'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='muclight.shakespeare.lit' name='MUC Light Service'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#32-discovering-the-features-supported-by-a-muc-light-service","title":"3.2. Discovering the Features Supported by a MUC Light Service","text":"

An entity may wish to discover if a service implements the Multi-User Chat protocol; in order to do so, it sends a service discovery information (\"disco#info\") query to the MUC service's JID.

Entity Queries Chat Service for MUC Light Support via Disco

<iq from='hag66@shakespeare.lit/pda'\n    id='lx09df27'\n    to='muclight.shakespeare.lit' type='get'>\n    <query xmlns='http://jabber.org/protocol/disco#info'/>\n</iq>\n

The service MUST return its identity and the features it supports.

Service Returns a Disco Info Result

<iq from='muclight.shakespeare.lit'\n    id='lx09df27'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#info'>\n        <identity category='conference' name='Shakespearean Chat Service' type='text'/>\n        <feature var='urn:xmpp:muclight:0'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#33-discovering-occupied-rooms","title":"3.3. Discovering Occupied Rooms","text":"

The service discovery items (\"disco#items\") protocol enables an entity to query a service for a list of associated items, which in the case of a chat service would consist of the specific chat rooms the entity occupies.

Entity Queries Chat Service for Rooms

<iq from='hag66@shakespeare.lit/pda'\n    id='zb8q41f4'\n    to='muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='http://jabber.org/protocol/disco#items'/>\n</iq>\n

The service MUST return a full list of the rooms the entity occupies. The server SHOULD include room name and version in each item.

Service Returns a Disco Items Result

<iq from='muclight.shakespeare.lit'\n    id='zb8q41f4'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='heath@muclight.shakespeare.lit' name='A Lonely Heath' version='1'/>\n        <item jid='coven@muclight.shakespeare.lit' name='A Dark Cave' version='2'/>\n        <item jid='forres@muclight.shakespeare.lit' name='The Palace' version='3'/>\n        <item jid='inverness@muclight.shakespeare.lit'\n              name='Macbeth&apos;s Castle'\n              version='4'/>\n    </query>\n</iq>\n

If the full list of rooms is large (see XEP-0030 for details), the service MAY return only a partial list of rooms. If it does, it MUST include a <set/> element qualified by the 'http://jabber.org/protocol/rsm' namespace (as defined in Result Set Management (XEP-0059) [1]) to indicate that the list is not the full result set.

Service Returns a Limited List of Disco Items Result

<iq from='muclight.shakespeare.lit'\n    id='hx51v49s'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='alls-well-that-ends-well@muclight.shakespeare.lit'\n              name='Everybody dies'\n              version='1'/>\n        <item jid='as-you-like-it@muclight.shakespeare.lit'\n              name='As you like it'\n              version='2'/>\n        <item jid='cleopatra@muclight.shakespeare.lit' name='Cleo fans' version='3'/>\n        <item jid='comedy-of-errors@muclight.shakespeare.lit'\n              name='404 Comedy not found'\n              version='4'/>\n        <item jid='coriolanus@muclight.shakespeare.lit'\n              name='What is Coriolanus?'\n              version='5'/>\n        <item jid='cymbeline@muclight.shakespeare.lit' name='Music room' version='6'/>\n        <item jid='hamlet@muclight.shakespeare.lit'\n              name='To chat or not to chat?'\n              version='7'/>\n        <item jid='henry-the-fourth-one@muclight.shakespeare.lit'\n              name='Royal Room 1'\n              version='8'/>\n        <item jid='henry-the-fourth-two@muclight.shakespeare.lit'\n              name='Royal Room 2'\n              version='9'/>\n        <item jid='henry-the-fifth@muclight.shakespeare.lit'\n              name='Royal Room Prime'\n              version='10'/>\n        <set xmlns='http://jabber.org/protocol/rsm'>\n            <first index='0'>alls-well-that-ends-well@muclight.shakespeare.lit</first>\n            <last>henry-the-fifth@muclight.shakespeare.lit</last>\n            <count>37</count>\n        </set>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#4-occupant-use-cases","title":"4. Occupant Use Cases","text":""},{"location":"open-extensions/muc_light/#41-sending-a-message-to-a-room","title":"4.1. Sending a message to a room","text":"

Every occupant in the room MAY broadcast messages to other occupants. In order to do so, the client MUST send a groupchat message to the room bare JID.

The room automatically assumes that occupants' nicks are equal to their bare JIDs. MUC light is designed for applications where it is not important to hide behind nicknames. On the contrary - it is up to the client to replace pure JIDs with user-friendly names like phone numbers or full names if necessary.

The room MUST route all messages of the 'groupchat' type.

Client sends a message to the room

<message from='hag66@shakespeare.lit/pda'\n         id='msg111'\n         to='coven@muclight.shakespeare.lit'\n         type='groupchat'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n

Server broadcasts a groupchat message

<message id='msg111' type='groupchat'\n    from='coven@muclight.shakespeare.lit/hag66@shakespeare.lit'\n    to='crone1@shakespeare.lit'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n
<message id='msg111' type='groupchat'\n    from='coven@muclight.shakespeare.lit/hag66@shakespeare.lit'\n    to='crone2@shakespeare.lit'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n

Note the message is sent to all the room occupants including the original sender.

<message id='msg111' type='groupchat'\n    from='coven@muclight.shakespeare.lit/hag66@shakespeare.lit'\n    to='hag66@shakespeare.lit'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n
"},{"location":"open-extensions/muc_light/#42-changing-a-room-subject","title":"4.2. Changing a room subject","text":"

The service MAY allow room occupants to set the room subject by changing the \"subject\" configuration field. A standard configuration stanza is used in this case. Subject change is announced like an ordinary configuration change.

Client sends a message to the room

<iq from='hag66@shakespeare.lit/pda'\n    id='subject1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <subject>To be or not to be?</subject>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='newsubject'>\n    <x xmlns='urn:xmpp:muclight:0#configuration'>\n        <prev-version>asdfghj000</prev-version>\n        <version>asdfghj</version>\n        <subject>To be or not to be?</subject>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag66@shakespeare.lit'\n         type='groupchat'\n         id='newsubject'>\n    <x xmlns='urn:xmpp:muclight:0#configuration'>\n        <prev-version>asdfghj000</prev-version>\n        <version>asdfghj</version>\n        <subject>To be or not to be?</subject>\n    </x>\n    <body />\n</message>\n
<iq to='hag66@shakespeare.lit/pda'\n    id='subject1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#43-requesting-room-information","title":"4.3. Requesting room information","text":"

Room occupants may request room information (configuration and/or occupants list) by an information version. It is up to the service to define the version string, the only requirement for it, is to be unique per room. Please note there are no separate versions for configuration and occupant list alone.

If the server side version does not match the one provided by the client (or if the client does not provide one, i.e. the 'version' element is empty), the service MUST respond with a current version string and full configuration and/or occupant list.

If the version strings match, server MUST reply with an empty result.

Only room occupants can get room information.

Matching versions

<iq from='crone1@shakespeare.lit/desktop'\n    id='config0'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <version>abcdefg</version>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='config0'\n    to='crone1@shakespeare.lit/desktop'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#431-getting-the-room-configuration","title":"4.3.1. Getting the room configuration","text":"

Client gets configuration from the server

<iq from='crone1@shakespeare.lit/desktop'\n    id='getconfig1'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <version>abcdefg</version>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='getconfig1'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <version>123456</version>\n        <roomname>A Dark Cave</roomname>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#432-requesting-a-user-list","title":"4.3.2. Requesting a user list","text":"

Client requests a user list

<iq from='crone1@shakespeare.lit/desktop'\n    id='getmembers'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>abcdefg</version>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='getmembers'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>123456</version>\n        <user affiliation='owner'>user1@shakespeare.lit</user>\n        <user affiliation='member'>user2@shakespeare.lit</user>\n        <user affiliation='member'>user3@shakespeare.lit</user>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#433-requesting-full-room-information","title":"4.3.3. Requesting full room information","text":"

Room occupants may request both lists (configuration + occupants) with a single request.

Client requests room information

<iq from='crone1@shakespeare.lit/desktop'\n    id='getinfo1'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#info'>\n        <version>abcdefg</version>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='getinfo1'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='urn:xmpp:muclight:0#info'>\n        <version>123456</version>\n        <configuration>\n            <roomname>A Dark Cave</roomname>\n        </configuration>\n        <occupants>\n            <user affiliation='owner'>user1@shakespeare.lit</user>\n            <user affiliation='member'>user2@shakespeare.lit</user>\n            <user affiliation='member'>user3@shakespeare.lit</user>\n        </occupants>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#44-leaving-the-room","title":"4.4. Leaving the room","text":"

Every occupant is allowed to leave the room at any time. It is done by modifying their own affiliation.

Occupant leaves the room

<iq from='crone1@shakespeare.lit/desktop'\n    id='leave1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='leave1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag77@shakespeare.lit'\n         type='groupchat'\n         id='leave1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <prev-version>1111111</prev-version>\n        <version>aaaaaaa</version>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag88@shakespeare.lit'\n         type='groupchat'\n         id='leave1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <prev-version>1111111</prev-version>\n        <version>aaaaaaa</version>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='leave1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#45-blocking-functionality","title":"4.5. Blocking functionality","text":"

A user MAY choose to automatically deny being added to the room. All stanzas must be directed to MUC Light service. User MAY send more than one item in a single request and mix both 'user' and 'room' elements.

If the occupant tries to add another user to the room, and this user has set a blocking policy, the server MUST ignore the attempt. No error is returned, this user is simply skipped when processing affiliation change query.

Service denies adding blocking user

<iq from='crone2@shakespeare.lit/desktop'\n    id='blocked1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>crone1@shakespeare.lit</user>\n        <user affiliation='member'>crone3@shakespeare.lit</user>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone2@shakespeare.lit'\n         type='groupchat'\n         id='blockedadd1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>crone3@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag88@@shakespeare.lit'\n         type='groupchat'\n         id='blockedadd1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>crone3@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<iq to='crone2@shakespeare.lit/desktop'\n    id='blocked1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#451-requesting-a-blocking-list","title":"4.5.1. Requesting a blocking list","text":"

In order to get the current blocking list in the MUC Light service, the client sends an empty IQ get query with a proper namespace.

The list includes only items with a 'deny' action, since the 'allow' behaviour is default for MUC Light and is only used for the list modification.

User retrieves a blocking list

<iq from='crone1@shakespeare.lit/desktop'\n    id='getblock1'\n    to='muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n    </query>\n</iq>\n
<iq type='result'\n    id='getblock1'\n    to='crone1@shakespeare.lit/desktop'\n    from='muclight.shakespeare.lit'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n        <room action='deny'>coven@muclight.shakespeare.lit</room>\n        <user action='deny'>hag77@shakespeare.lit</user>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#452-blocking-a-room","title":"4.5.2. Blocking a room","text":"

In order to block a room, a query must contain at least one 'room' item with a 'deny' action and a room bare JID in the content.

User blocks a room

<iq from='crone1@shakespeare.lit/desktop'\n    id='block1'\n    to='muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n        <room action='deny'>coven@muclight.shakespeare.lit</room>\n        <room action='deny'>chapel@shakespeare.lit</room>\n    </query>\n</iq>\n
<iq type='result'\n    id='block1'\n    to='crone1@shakespeare.lit/desktop'\n    from='muclight.shakespeare.lit' />\n
"},{"location":"open-extensions/muc_light/#453-blocking-a-user","title":"4.5.3. Blocking a user","text":"

In order to block a user, a query must contain at least one 'user' item with a 'deny' action and a user bare JID in the content.

User blocks another user

<iq from='crone1@shakespeare.lit/desktop'\n    id='block2'\n    to='muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n        <user action='deny'>hag66@shakespeare.lit</user>\n        <user action='deny'>hag77@shakespeare.lit</user>\n    </query>\n</iq>\n
<iq type='result'\n    id='block2'\n    to='crone1@shakespeare.lit/desktop'\n    from='muclight.shakespeare.lit' />\n
"},{"location":"open-extensions/muc_light/#454-unblocking","title":"4.5.4. Unblocking","text":"

In order to cancel a blocking, a query must contain at least one 'room' or 'user' item with an 'allow' action and an appropriate bare JID in the content.

Unblocking a JID that is not blocked does not trigger any error. The server MUST return an empty IQ result in such case.

User cancels blocking

<iq from='crone1@shakespeare.lit/desktop'\n    id='unblock1'\n    to='muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n        <room action='allow'>coven@muclight.shakespeare.lit</room>\n        <user action='allow'>hag66@shakespeare.lit</user>\n    </query>\n</iq>\n
<iq type='result'\n    id='unblock1'\n    to='crone1@shakespeare.lit/desktop'\n    from='muclight.shakespeare.lit' />\n
"},{"location":"open-extensions/muc_light/#5-owner-use-cases","title":"5. Owner Use Cases","text":""},{"location":"open-extensions/muc_light/#51-creating-a-new-room","title":"5.1. Creating a new room","text":"

A room is created by submitting a dedicated stanza. The client application should pick a random room node name, since a human-readable room name is in configuration.

For rules that apply to the configuration options, please see \"Setting room configuration\" chapter.

The client MAY include initial configuration and occupant list (the list MUST NOT include the creator). The server MAY allow sending an incomplete configuration form. In such case the server MUST use the default values for missing fields. The server MAY enforce a minimal occupant list length.

The service MAY either give the creator the 'owner' or 'member' status. In the latter case all users are equal.

Upon room creation success, the service MUST reply with an empty IQ result.

The following rules (similar to the ones relevant to the affiliation change request) apply to the occupant list:

  • 'none' affiliation cannot be used.
  • All user bare JIDs must be unique
  • At most one owner can be chosen. If none is chosen, the room creator will become \"just\" a 'member'.

After the room is created (but before receiving IQ result), new occupants (including the creator) receive <message/> from the room with their affiliations (the stanza MUST include only recipient's affiliation) and the initial room version. <prev-version/> element MUST NOT be included.

Client requests room creation

<iq from='crone1@shakespeare.lit/desktop'\n    id='create1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#create'>\n        <configuration>\n            <roomname>A Dark Cave</roomname>\n        </configuration>\n        <occupants>\n            <user affiliation='member'>user1@shakespeare.lit</user>\n            <user affiliation='member'>user2@shakespeare.lit</user>\n        </occupants>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='createnotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>aaaaaaa</version>\n        <user affiliation='owner'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='user1@shakespeare.lit'\n         type='groupchat'\n         id='createnotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>aaaaaaa</version>\n        <user affiliation='member'>user1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='user2@shakespeare.lit'\n         type='groupchat'\n         id='createnotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>aaaaaaa</version>\n        <user affiliation='member'>user2@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='create1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#511-requesting-a-new-room-with-a-unique-name","title":"5.1.1. Requesting a new room with a unique name","text":"

If a client would like to avoid a room JID conflict, it MAY request creating a new room with a server-side generated name, that is verfied to be unique. In order to do so, the client MUST send a creation request to service JID, not room bare JID. The IQ result will originate from the new room bare JID

The messages with affiliation change notifications MUST have the same ID as IQ set and result.

Client requests room creation

<iq from='crone1@shakespeare.lit/desktop'\n    id='createrandom'\n    to='muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#create'>\n        <configuration>\n            <roomname>Random Cave</roomname>\n        </configuration>\n    </query>\n</iq>\n
<message from='randomcave@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='createrandom'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>aaaaaaa</version>\n        <user affiliation='owner'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='createrandom'\n    from='muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#512-room-already-exists","title":"5.1.2. Room already exists","text":"

If the chosen room name already exists, the service MUST return a 'conflict' error.

Client requests room creation with existing name

<iq from='crone1@shakespeare.lit/desktop'\n    id='conflict1'\n    to='castle@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#create'>\n        <configuration>\n            <roomname>A Dark Cave</roomname>\n        </configuration>\n    </query>\n</iq>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='conflict1'\n    from='castle@muclight.shakespeare.lit'\n    type='error'>\n    <error type='cancel'>\n        <conflict xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</iq>\n
"},{"location":"open-extensions/muc_light/#52-destroying-a-room","title":"5.2. Destroying a room","text":"

A room is automatically destroyed when its occupant list becomes empty or the room owner explicitly sends an IQ with a room destroy request.

Before sending an IQ result, every occupant is notified that its affiliation has changed to 'none'. These notifications include an <x/> element qualified with a \"urn:xmpp:muclight:0#destroy\" namespace.

Only the room owner is allowed to destroy it.

Room destruction notification SHOULD NOT contain version (or \"prev-version\" information).

Client requests room destruction

<iq from='crone1@shakespeare.lit/desktop'\n    id='destroy1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#destroy' />\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='destroynotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </x>\n    <x xmlns='urn:xmpp:muclight:0#destroy' />\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag77@shakespeare.lit'\n         type='groupchat'\n         id='destroynotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>hag77@shakespeare.lit</user>\n    </x>\n    <x xmlns='urn:xmpp:muclight:0#destroy' />\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag88@shakespeare.lit'\n         type='groupchat'\n         id='destroynotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </x>\n    <x xmlns='urn:xmpp:muclight:0#destroy' />\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='create1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#53-setting-room-configuration","title":"5.3. Setting room configuration","text":"

Only room owners can modify the room configuration but the service MAY allow members to change it too.

All room occupants MUST be notified about a configuration change and both the new and old room version string (<version /> and <prev-version /> respectively).

\"version\" and \"prev-version\" configuration field names are NOT ALLOWED - they are reserved for room versioning.

The service MAY allow the client to set the configuration fields with any name but it is NOT RECOMMENDED.

The Data Forms are not used for the configuration. Instead, the config fields are encoded in XML elements with names equal to the key and content equal to the value.

Client configuration request to the server

<iq from='crone1@shakespeare.lit/desktop'\n    id='conf2'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <roomname>A Darker Cave</roomname>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='configchange'>\n    <x xmlns='urn:xmpp:muclight:0#configuration'>\n        <prev-version>zaqwsx</prev-version>\n        <version>zxcvbnm</version>\n        <roomname>A Darker Cave</roomname>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag66@shakespeare.lit'\n         type='groupchat'\n         id='configchange'>\n    <x xmlns='urn:xmpp:muclight:0#configuration'>\n        <prev-version>zaqwsx</prev-version>\n        <version>zxcvbnm</version>\n        <roomname>A Darker Cave</roomname>\n    </x>\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='conf2'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n

The server SHOULD accept incomplete (i.e. delta) configuration forms. In such case, values of the missing fields SHOULD be preserved.

"},{"location":"open-extensions/muc_light/#54-changing-the-occupant-list","title":"5.4. Changing the occupant list","text":"

The occupant list is modified by a direct affiliation change. Following rules apply:

  1. There are only 3 affiliations.
    • owner - can do everything in the room
    • member - can send messages to the room and if the service allows it, can also change configuration or change others' affiliations
    • none - not in the room; it's a keyword for marking a user for removal from a room
  2. Every occupant can change its own affiliation to none in order to leave the room.
  3. The only way to join the room is being added by other occupant.
  4. The owner can change affiliations at will.
  5. If the owner leaves, the server MAY use any strategy to choose a new one.
  6. The room can have at most one owner. Giving someone else the 'owner' status effectively causes the current one to lose it.
  7. The owner can choose a new owner when leaving by including both 'none' and 'owner' items in affiliation change request.
  8. Every user JID can be used in the request at most once.
  9. A single request MAY change multiple affiliations.
  10. All changes must be meaningful, e.g. setting member's affiliation to 'member' is considered a bad request.
  11. Server MAY allow members to add new members but they still cannot make anyone an 'owner' or remove other users from the room.
  12. On success the server will reply with a result IQ with all the changed items. BEFORE returning the IQ result, the service MUST route a message with the affiliation change to all relevant users.

Newcomers, i.e. users that were not occupants before the change, SHOULD receive only their own affiliation and SHOULD NOT receive a <prev-version /> element.

The notifications must include both the new and old room version (<version /> and <prev-version /> respectively) string (except for the ones directed to users that have been removed from the room).

The notifications contain a list of items. The item list may be different from the list in the IQ set, because some of the changes may require additional operations, e.g. choosing new owner when the old one leaves. Users, that are still in the room after the change, will receive the full change list. Users, that have been removed from the room with the request, will get only one item: themselves with affiliation 'none'.

Affiliations change request

Let's consider a room coven with following members:

  • crone1 - owner
  • hag77 - member
  • hag88 - member

hag66 is not in the room yet.

User crone1 wants to add hag66 to the room, kick hag88 out and make hag77 the room owner.

<iq from='crone1@shakespeare.lit/desktop'\n    id='member1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n        <user affiliation='owner'>hag77@shakespeare.lit</user>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </query>\n</iq>\n

Now each user will receive an update. As you can see, affiliations have changed accordingly to crone1 request. However, this request implies one more update. Since hag77 has been promoted to a new owner, crone1 is automatically degraded to member.

<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='memberchange'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <prev-version>njiokm</prev-version>\n        <version>qwerty</version>\n        <user affiliation='member'>crone1@shakespeare.lit</user>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n        <user affiliation='owner'>hag77@shakespeare.lit</user>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </x>\n    <body></body>\n</message>\n

Because hag66 was not a member of this room before, they only receive their own affiliation and no prev-version element.

<message from='coven@muclight.shakespeare.lit'\n         to='hag66@shakespeare.lit'\n         type='groupchat'\n         id='memberchange'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>qwerty</version>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n    </x>\n    <body></body>\n</message>\n

hag77 receives an ordinary update, just like crone1.

<message from='coven@muclight.shakespeare.lit'\n         to='hag77@shakespeare.lit'\n         type='groupchat'\n         id='memberchange'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <prev-version>njiokm</prev-version>\n        <version>qwerty</version>\n        <user affiliation='member'>crone1@shakespeare.lit</user>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n        <user affiliation='owner'>hag77@shakespeare.lit</user>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </x>\n    <body></body>\n</message>\n

hag88 has been kicked out of the room and therefore gets only their own affiliation change of type 'none'.

<message from='coven@muclight.shakespeare.lit'\n         to='hag88@shakespeare.lit'\n         type='groupchat'\n         id='memberchange'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </x>\n    <body></body>\n</message>\n

crone1 gets the result IQ after the change.

<iq to='crone1@shakespeare.lit/desktop'\n    id='member1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n

"},{"location":"open-extensions/muc_light/#6-interactions-with-rfcs-and-other-xeps","title":"6. Interactions with RFCs and other XEPs","text":""},{"location":"open-extensions/muc_light/#61-user-rosters","title":"6.1. User rosters","text":"

The service MAY add user's rooms to its roster. It allows the client to skip the separate Disco request to the service. Roster items with rooms MUST belong to the group \"urn:xmpp:muclight:0\" (MUC Light namespace) and include the <version/> element. Their subscription type MUST be 'to'.

Entity requests the roster and receives a reply that includes a room item

<iq type='get' id='roster1' to='shakespeare.lit'>\n    <query xmlns='jabber:iq:roster'/>\n</iq>\n
<iq id='roster1' to='hag66@shakespeare.lit/tablet' type='result'>\n    <query xmlns='jabber:iq:roster' ver='ver7'>\n        <item jid='hag77@shakespeare.lit' subscription='both'/>\n        <item jid='hag88@shakespeare.lit' subscription='both'/>\n        <item jid='coven@muclight.shakespeare.lit' name='The Coven' subscription='to'>\n            <group>urn:xmpp:muclight:0</group>\n            <version>1234345</version>\n        </item>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#62-xep-0313-message-archive-management","title":"6.2. XEP-0313 Message Archive Management","text":"

This section defines the rules for archiving MUC Light events and messages. Stanzas described in the subsections below MUST be archived by the server. The stanzas not included here MUST NOT be archived.

The <message/> element inside <forwarded/> MUST include a \"from\" attribute and MUST NOT include a \"to\" attribute. \"id\" SHOULD be archived as well.

In case of regular groupchat messages, the \"from\" attribute MUST consist of a room full JID with a sender bare JID in the resource part. As for room notification, e.g. create event, \"from\" MUST be equal to room bare JID.

Examples below use MAM v0.4 protocol. The archive can be fetched only from a specific room, the client MUST NOT query MUC Light service directly.

"},{"location":"open-extensions/muc_light/#621-groupchat-message-from-occupant","title":"6.2.1. Groupchat message from occupant","text":"

Message from a user MUST be archived with all child elements.

Occupant queries MAM and receives regular groupchat message

<iq type='set' id='mamget1' to='coven@muclight.shakespeare.lit'>\n    <query xmlns='urn:xmpp:mam:1' queryid='f27' />\n</iq>\n
<message id='aeb213' to='hag66@shakespeare.lit/pda'>\n    <result xmlns='urn:xmpp:mam:1' queryid='f27' id='28482-98726-73623'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2010-07-10T23:08:25Z'/>\n            <message from=\"coven@muclight.shakespeare.lit/hag77@shakespeare.lit\"\n                     id=\"msgid11\">\n                <body>Welcome!</body>\n                <x xmlns=\"elixir:ingredient\">bat-wing</x>\n            </message>\n        </forwarded>\n    </result>\n</message>\n
<iq type='result' id='mamget1' from='coven@muclight.shakespeare.lit'/>\n
"},{"location":"open-extensions/muc_light/#622-affiliation-change","title":"6.2.2. Affiliation change","text":"

Every archived affiliation change notification MUST include the <version/> element and MUST NOT contain the <prev-version/> element.

Occupant queries MAM and receives an affiliation change notification

<iq type='set' id='mamget2' to='muclight.shakespeare.lit'>\n    <query xmlns='urn:xmpp:mam:1' queryid='f37' />\n</iq>\n
<message id='aef2133' to='hag66@shakespeare.lit/pda'>\n    <result xmlns='urn:xmpp:mam:1' queryid='f37' id='21482-98726-71623'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2013-07-10T21:08:25Z'/>\n            <message from=\"coven@muclight.shakespeare.lit\" id=\"notifid11\">\n                <x xmlns='urn:xmpp:muclight:0#affiliations'>\n                    <version>b9uf13h98f13</version>\n                    <user affiliation='owner'>hag66@shakespeare.lit</user>\n                    <user affiliation='member'>user1@shakespeare.lit</user>\n                    <user affiliation='member'>user2@shakespeare.lit</user>\n                </x>\n            </message>\n        </forwarded>\n    </result>\n</message>\n
<iq type='result' id='mamget12'/>\n
"},{"location":"open-extensions/muc_light/#623-room-creation","title":"6.2.3. Room creation","text":"

Room creation is archived as an affiliation change that includes ALL initial occupants (including the room creator).

"},{"location":"open-extensions/muc_light/#7-general-error-cases","title":"7. General Error Cases","text":""},{"location":"open-extensions/muc_light/#71-client-sends-an-unauthorized-stanza-to-a-room","title":"7.1. Client sends an unauthorized stanza to a room","text":"

If a client sends a stanza to the room, that it does not occupy, the service MUST reply with the 'item-not-found' error.

Unauthorized IQ

<iq from='crone1@shakespeare.lit/desktop'\n    id='member1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n    </query>\n</iq>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='member1'\n    from='coven@muclight.shakespeare.lit'\n    type='error'>\n    <error type='cancel'>\n        <item-not-found xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</iq>\n

Unauthorized message

<message from='hag66@shakespeare.lit/pda'\n         id='unauth2'\n         to='coven@muclight.shakespeare.lit'\n         type='groupchat'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n
<message to='hag66@shakespeare.lit/pda'\n         id='unauth2'\n         from='coven@muclight.shakespeare.lit'\n         type='error'>\n    <error type='cancel'>\n        <item-not-found xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</message>\n
"},{"location":"open-extensions/muc_light/#72-client-sends-a-presence-stanza-to-the-service","title":"7.2. Client sends a <presence/> stanza to the service","text":"

The service MUST ignore all <presence/> stanzas sent by the client.

"},{"location":"open-extensions/muc_light/#73-client-sends-an-invalid-stanza-to-the-service","title":"7.3. Client sends an invalid stanza to the service","text":"

If service receives an invalid stanza it MUST reply with a 'bad-request' error.

Invalid IQ

<iq from='crone1@shakespeare.lit/desktop'\n    id='bad1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <item role='participant'>hag66@shakespeare.lit</item>\n    </query>\n</iq>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='bad1'\n    from='coven@muclight.shakespeare.lit'\n    type='error'>\n    <error type='modify'>\n        <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</iq>\n

Invalid message

<message from='hag66@shakespeare.lit/pda'\n         id='bad2'\n         to='coven@muclight.shakespeare.lit'\n         type='chat'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n
<message to='hag66@shakespeare.lit/pda'\n         id='bad2'\n         from='coven@muclight.shakespeare.lit'\n         type='error'>\n    <error type='modify'>\n        <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</message>\n
"},{"location":"open-extensions/muc_light/#74-request-sender-has-insufficient-privileges","title":"7.4. Request sender has insufficient privileges","text":"

If the request sender does not have sufficient privileges (but is a room occupant), the service MUST reply with a 'not-allowed' error.

It occurs in the following cases:

  • A member tries to change the configuration but the service is not configured to allow it. It does not apply to the subject change, although it has to be performed by sending <message/> with <subject/>, not configuration <iq/>.
  • A member tries to change anyone's affiliation to 'none' or 'owner'.
  • A member tries to change someone's affiliation to 'member' but the service is not configured to allow it.

Prohibited IQ

<iq from='minion@shakespeare.lit/desktop'\n    id='privileges1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user role='owner'>minion@shakespeare.lit</user>\n    </query>\n</iq>\n
<iq to='minion@shakespeare.lit/desktop'\n    id='privileges1'\n    from='coven@muclight.shakespeare.lit'\n    type='error'>\n    <error type='cancel'>\n        <not-allowed xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</iq>\n
"},{"location":"open-extensions/muc_light/#8-implementation-notes","title":"8. Implementation Notes","text":""},{"location":"open-extensions/muc_light/#81-xep-0045-mappings-aka-legacy-mode","title":"8.1. XEP-0045 mappings a.k.a. legacy mode","text":"

Some client-side developers might choose to use existing XEP-0045 Multi-User Chat implementations to interface with the new MUC Light. There may be various reasons to do so: using a familiar protocol, avoiding additional implementation, quick prototyping etc. This section provides suggestions of mappings between XEP-0045 stanzas and the new ones described in this document. These mappings are ONLY available to use in the legacy mode, which allows using a subset of classic MUC stanzas but comes with the drawback that some of the functions are limited.

Operations not described here SHOULD remain unmodified.

"},{"location":"open-extensions/muc_light/#811-discovering-the-features-supported-by-a-muc-service","title":"8.1.1. Discovering the Features Supported by a MUC Service","text":"

A Disco result MAY either include a new <feature/> element with an \"http://jabber.org/protocol/muc\" namespace next to MUC Light one, or completely replace it, which is the RECOMMENDED behaviour.

Returning a MUC namespace in Disco

<iq from='hag66@shakespeare.lit/pda'\n    id='lx09df27'\n    to='muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='http://jabber.org/protocol/disco#info'/>\n</iq>\n
<iq from='muclight.shakespeare.lit'\n    id='lx09df27'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#info'>\n        <identity category='conference'\n                  name='Shakespearean Chat Service'\n                  type='text'/>\n        <feature var='http://jabber.org/protocol/muc'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#812-discovering-occupied-rooms","title":"8.1.2. Discovering Occupied Rooms","text":"

The room list MUST NOT include room versions.

Service Returns Disco Items Result

<iq from='muclight.shakespeare.lit'\n    id='zb8q41f4'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='heath@muclight.shakespeare.lit'\n              name='A Lonely Heath'/>\n        <item jid='coven@muclight.shakespeare.lit'\n              name='A Dark Cave'/>\n        <item jid='forres@muclight.shakespeare.lit'\n              name='The Palace'/>\n        <item jid='inverness@muclight.shakespeare.lit'\n              name='Macbeth&apos;s Castle'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#813-changing-a-room-subject","title":"8.1.3. Changing a room subject","text":"

Instead of distributing the configuration change notifications, the room MUST route <message/> with a <subject/> like a classic MUC would. The client MUST send a classic message <subject/> as well. The room SHOULD save a new subject in the room configuration.

New subject is routed as an ordinary message

<message from='hag66@shakespeare.lit/pda'\n         id='compsubject'\n         to='coven@muclight.shakespeare.lit'\n         type='groupchat'>\n    <subject>To be or not to be?</subject>\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='compsubject'>\n    <subject>To be or not to be?</subject>\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag66@shakespeare.lit'\n         type='groupchat'\n         id='compsubject'>\n    <subject>To be or not to be?</subject>\n</message>\n
"},{"location":"open-extensions/muc_light/#814-getting-a-room-configuration","title":"8.1.4. Getting a room configuration","text":"

Room configuration is encoded in a Data Form, that simulates the XEP-0045 config form.

Getting the room configuration does not benefit from room versioning.

Requesting room configuration

<iq from='crone1@shakespeare.lit/desktop'\n    id='comp-config'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='http://jabber.org/protocol/muc#owner'/>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-config'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/muc#owner'>\n        <x xmlns='jabber:x:data' type='form'>\n            <title>Configuration for \"coven\" Room</title>\n            <field type='hidden' var='FORM_TYPE'>\n                <value>http://jabber.org/protocol/muc#roomconfig</value>\n            </field>\n            <field label='Natural-Language Room Name'\n                   type='text-single'\n                   var='muc#roomconfig_roomname'>\n                <value>A Dark Cave</value>\n            </field>\n            <field label='Room subject'\n                   type='text-single'\n                   var='muc#roomconfig_subject'>\n                <value>To be or not to be?</value>\n            </field>\n        </x>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#815-requesting-a-user-list","title":"8.1.5. Requesting a user list","text":"

A user list is retrieved with an affiliation IQ get.

Requesting affiliation list

<iq from='crone1@shakespeare.lit/desktop'\n    id='comp-getaff'\n    to='coven@muclight.shakespeare.lit' type='get'>\n    <query xmlns='http://jabber.org/protocol/muc#admin'>\n        <item affiliation='owner'/>\n        <item affiliation='member'/>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-getaff'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/muc#admin'>\n        <item affiliation='owner'\n              jid='crone1@shakespeare.lit'\n              nick='crone1@shakespeare.lit'\n              role='moderator'/>\n        <item affiliation='member'\n              jid='hag66@shakespeare.lit'\n              nick='hag66@shakespeare.lit'\n              role='participant'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#816-requesting-room-information","title":"8.1.6. Requesting room information","text":"

There is no XEP-0045 equivalent for getting full room information.

"},{"location":"open-extensions/muc_light/#817-leaving-the-room","title":"8.1.7. Leaving the room","text":"

Leaving the room is performed by setting the own affiliation to 'none'. The service uses <presence/> to notify all occupants (and former occupant) about the change. <presence/> to the leaving occupant MUST be of the type \"unavailable\" and MUST include a status code 321 (i.e. user leaving due to affiliation change).

Leaving the room

<iq from='crone1@shakespeare.lit/desktop'\n    id='comp-leave'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='http://jabber.org/protocol/muc#admin'>\n        <item affiliation='none' jid='crone1@shakespeare.lit'/>\n    </query>\n</iq>\n
<presence from='coven@muclight.shakespeare.lit/crone1@shakespeare.lit'\n          to='crone1@shakespeare.lit'\n          type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' jid='crone1@shakespeare.lit/pda' role='none'/>\n        <status code='321'/>\n    </x>\n</presence>\n
<presence from='coven@muclight.shakespeare.lit/crone1@shakespeare.lit'\n          to='hag66@shakespeare.lit/desktop'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' jid='crone1@shakespeare.lit/pda' role='none'/>\n        <status code='321'/>\n    </x>\n</presence>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-leave'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'/>\n
"},{"location":"open-extensions/muc_light/#818-blocking-functionality","title":"8.1.8. Blocking functionality","text":"

The blocking functionality uses a small subset of the Privacy Lists protocol. Stanzas MUST be addressed to the sender's bare JID (the to attribute may be skipped). The privacy list name MUST be equal to \"urn:xmpp:muclight:0\". Obviously, this method won't work properly in XMPP Server Federation, because privacy stanzas are handled by sender's server and the MUC Light Blocking functionality is handled by a MUC Light service server. As opposed to XEP-0016, it is allowed to send \"delta\" privacy lists.

"},{"location":"open-extensions/muc_light/#8181-request-blocking-list","title":"8.1.8.1. Request blocking list","text":"

Retrieving blocking list

<iq from='crone1@shakespeare.lit/desktop' type='get' id='comp-getlist'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'/>\n    </query>\n</iq>\n
<iq type='result' id='comp-getlist' to='crone1@shakespeare.lit/desktop'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'>\n            <item type='jid'\n                  value='coven@muclight.shakespeare.lit'\n                  action='deny'\n                  order='1'/>\n            <item type='jid'\n                  value='muclight.shakespeare.lit/hag66@shakespeare.lit'\n                  action='deny'\n                  order='1'/>\n        </list>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#8182-blocking-a-room","title":"8.1.8.2. Blocking a room","text":"

In order to block a room, the client MUST deny a room bare JID in privacy list.

Blocking a room

<iq from='crone1@shakespeare.lit/desktop' type='set' id='comp-blockroom'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'>\n            <item type='jid'\n                  value='coven@muclight.shakespeare.lit'\n                  action='deny'\n                  order='1'/>\n        </list>\n    </query>\n</iq>\n
<iq type='result' id='comp-blockroom' to='crone1@shakespeare.lit/desktop' />\n
"},{"location":"open-extensions/muc_light/#8183-blocking-a-user","title":"8.1.8.3. Blocking a user","text":"

In order to block a room, the client MUST deny a service JID with user's bare JID in the resource.

Blocking a user

<iq from='crone1@shakespeare.lit/desktop' type='set' id='comp-blockuser'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'>\n            <item type='jid'\n                  value='muclight.shakespeare.lit/hag66@shakespeare.lit'\n                  action='deny'\n                  order='1'/>\n        </list>\n    </query>\n</iq>\n
<iq type='result' id='comp-blockuser' to='crone1@shakespeare.lit/desktop' />\n
"},{"location":"open-extensions/muc_light/#8184-unblocking","title":"8.1.8.4. Unblocking","text":"

Unblocking

<iq from='crone1@shakespeare.lit/desktop' type='get' id='comp-getlist'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'>\n            <item type='jid'\n                  value='coven@muclight.shakespeare.lit'\n                  action='allow'\n                  order='1'/>\n            <item type='jid'\n                  value='muclight.shakespeare.lit/hag66@shakespeare.lit'\n                  action='allow'\n                  order='1'/>\n        </list>\n    </query>\n</iq>\n
<iq type='result' id='comp-getlist' to='crone1@shakespeare.lit/desktop' />\n
"},{"location":"open-extensions/muc_light/#819-creating-a-room","title":"8.1.9. Creating a room","text":"

The room is created in a standard XEP-0045 way. Client MUST use a nick equal to their own bare JID.

Compatibility mode MUST NOT support a unique room name generation.

Creating a room

<presence from='crone1@shakespeare.lit/desktop'\n          to='coven@muclight.shakespeare.lit/crone1@shakespeare.lit'>\n    <x xmlns='http://jabber.org/protocol/muc'/>\n</presence>\n
<presence from='coven@chat.shakespeare.lit/crone1@shakespeare.lit'\n          to='crone1@shakespeare.lit/desktop'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='owner' role='moderator'/>\n        <status code='110'/>\n        <status code='201'/>\n    </x>\n</presence>\n
"},{"location":"open-extensions/muc_light/#8191-room-already-exists","title":"8.1.9.1. Room already exists","text":"

If the client attempts to create a room that is already used, it will receive an error <presence/> informing that registration is required (like in the case of members-only rooms in XEP-0045).

Creating a room

<presence from='coven@muclight.shakespeare.lit/crone1@shakespeare.lit'\n          to='crone1@shakespeare.lit/desktop'\n          type='error'>\n    <x xmlns='http://jabber.org/protocol/muc'/>\n    <error by='coven@muclight.shakespeare.lit' type='auth'>\n        <registration-required xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</presence>\n
"},{"location":"open-extensions/muc_light/#8110-destroying-the-room","title":"8.1.10. Destroying the room","text":"

A classic XEP-0045 method is used but the service SHOULD NOT forward reason and alternate venue JID.

Destroying the room

<iq from='crone1@shakespeare.lit/desktop'\n    id='begone'\n    to='heath@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='http://jabber.org/protocol/muc#owner'>\n        <destroy jid='coven@muclight.shakespare.lit'>\n            <reason>Some reason.</reason>\n        </destroy>\n    </query>\n</iq>\n
<presence from='heath@chat.shakespeare.lit/crone1@shakespeare.lit'\n    to='crone1@shakespeare.lit/desktop' type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' role='none'/>\n        <destroy />\n    </x>\n</presence>\n
<presence\n    from='heath@chat.shakespeare.lit/wiccarocks@shakespeare.lit'\n    to='wiccarocks@shakespeare.lit/laptop' type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' role='none'/>\n        <destroy />\n    </x>\n</presence>\n
<presence\n    from='heath@chat.shakespeare.lit/hag66@shakespeare.lit'\n    to='hag66@shakespeare.lit/pda'\n    type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' role='none'/>\n        <destroy />\n    </x>\n</presence>\n
<iq from='heath@chat.shakespeare.lit'\n    id='begone'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'/>\n
"},{"location":"open-extensions/muc_light/#8111-setting-room-configuration","title":"8.1.11. Setting room configuration","text":"

Room occupants can use a standard XEP-0045 configuration modification method. The service MUST broadcast only the notification about the configuration change with a status code 104, so every occupant can retrieve the new room configuration in a separate request. The client is allowed to send a config delta in a form.

Setting room configuration

<iq to='coven@muclight.shakespeare.lit'\n    id='comp-setconfig'\n    from='crone1@shakespeare.lit/desktop'\n    type='set'>\n    <query xmlns='http://jabber.org/protocol/muc#owner'>\n        <x xmlns='jabber:x:data' type='form'>\n            <field type='hidden' var='FORM_TYPE'>\n                <value>http://jabber.org/protocol/muc#roomconfig</value>\n            </field>\n            <field label='Natural-Language Room Name'\n                   type='text-single'\n                   var='muc#roomconfig_roomname'>\n                <value>A Darker Cave</value>\n            </field>\n            <field label='Room subject'\n                   type='text-single'\n                   var='muc#roomconfig_subject'>\n                <value>To be!</value>\n            </field>\n        </x>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         id='comp-confchange'\n         to='crone1@shakespeare.lit/desktop'\n         type='groupchat'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <status code='104'/>\n    </x>\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         id='comp-confchange'\n         to='crone2@shakespeare.lit/desktop'\n         type='groupchat'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <status code='104'/>\n    </x>\n</message>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-setconfig'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'/>\n
"},{"location":"open-extensions/muc_light/#8112-changing-occupant-list","title":"8.1.12. Changing occupant list","text":"

The service MUST send an affiliation change notification to all participants. Leaving users MUST NOT receive any information except for their own \"none\" affiliation. New users MUST receive an invitation message.

Changing occupant list

<iq from='crone1@shakespeare.lit/desktop'\n    id='comp-setaff'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='http://jabber.org/protocol/muc#admin'>\n        <item affiliation='none' jid='hag66@shakespeare.lit'/>\n        <item affiliation='member' jid='hecate@shakespeare.lit'/>\n    </query>\n</iq>\n
<presence from='coven@chat.shakespeare.lit/hag66@shakespeare.lit'\n          to='hag66@shakespeare.lit'\n          type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' jid='hag66@shakespeare.lit' role='none'/>\n        <status code='321'/>\n    </x>\n</presence>\n
<message from='coven@muclight.shakespeare.lit'\n         id='comp-invite0'\n         to='hecate@shakespeare.lit'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <invite from='crone1@shakespeare.lit'/>\n    </x>\n</message>\n
<presence from='coven@chat.shakespeare.lit/hag66@shakespeare.lit'\n          to='crone1@shakespeare.lit'\n          type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' jid='hag66@shakespeare.lit' role='none'/>\n        <status code='321'/>\n    </x>\n</presence>\n
<presence from='coven@chat.shakespeare.lit/hecate@shakespeare.lit'\n          to='crone1@shakespeare.lit'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='member'\n              jid='hecate@shakespeare.lit'\n              role='participant'\n              nick='hecate@shakespeare.lit'/>\n    </x>\n</presence>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-setaff'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'/>\n
"},{"location":"open-extensions/muc_light/#82-service-limits-and-configuration","title":"8.2. Service limits and configuration","text":"

The MUC Light service may be abused by a malicious users, e.g. due to replicating a single message for every room occupant. The list below contains suggested configurable limits that SHOULD be implemented.

The service features that might vary depending on a specific application are included as well.

  • Maximum number of rooms the user occupies.
  • Blocking feature enabled/disabled.
  • XEP-0045 compatibility mode enabled/disabled.
  • Room creator's initial affiliation: owner/member.
  • Room configuration may be changed by owner/occupants.
  • New members can be invited by owner/occupants.
  • Maximal room size.
"},{"location":"open-extensions/smart-markers/","title":"Smart Markers","text":"

This module allows the client to query for the most recent chat markers.

When a client enters a conversation after being offline for a while, such client might want to know what was the last message-id that was marked according to the rules defined in XEP-0333 - Chat Markers, in order to know where he left of, and build an enhanced UI.

MongooseIM provides such functionality, using mod_smart_markers

"},{"location":"open-extensions/smart-markers/#namespace","title":"Namespace","text":"
esl:xmpp:smart-markers:0\n
"},{"location":"open-extensions/smart-markers/#fetching-a-conversations-latest-markers","title":"Fetching a conversation's latest markers","text":"

Given a peer, i.e., another user or a muc room, we can fetch the marker we last sent, to the main thread or any other sub-thread, with an IQ like the following:

<iq id='iq-unique-id' type='get'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='<peer-bare-jid>' [thread='<thread-id>' after='<RFC3339-timestamp>'] />\n</iq>\n
where:

  • <peer-bare-jid> MUST be the bare jid of the peer whose last marker wants to be checked. It can be the bare jid of a user, or of MUC room.
  • <thread> is an optional attribute that indicates if the check refers to specific a thread in the conversation. If not provided, defaults to the main conversation thread.
  • <after> is an optional attribute indicating whether markers sent only after a certain timestamp are desired. This most often makes sense for big groupchats, as a potential filter to reduce the amount of markers that will be returned.

Then the following would be received, was there to be any marker:

<iq from='user-bare-jid' to='user-jid' id='iq-unique-id' type='result'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='peer-bare-jid'>\n    <marker from='<sender-bare-jid>' id='<message-id>' type='<type>' timestamp='<RFC3339>' [thread='<thread-id>']/>\n  </query>\n</iq>\n
where peer-bare-jid matches the requested bare jid and the subelements are marker xml payloads with the following attributes:

  • <id> is the message id associated to this marker.
  • <type> is a marker as described in XEP-0333.
  • <timestamp> contains an RFC3339 timestamp indicating when the marker was sent
  • <thread> is an optional attribute that indicates if the marker refers to specific a thread in the conversation, or the main conversation if absent.
  • <sender-bare-jid> is the bare jid of the peer who sent the marker, which can be the requester itself, or any peer in the conversation, for both 1:1 chats or groupchats.
"},{"location":"open-extensions/smart-markers/#example-11","title":"Example: 1:1","text":"
<!-- Alice fetches markers in her conversation with Bob -->\n<iq id='iq-unique-id' type='get'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='bob@localhost' />\n</iq>\n\n<!-- She receives as an answer -->\n<iq from='alice@localhost' to='alice@localhost/res1' id='iq-unique-id' type='result'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='bob@localhost'>\n    <marker from='alice@localhost' id='ABCDEFGHIJ' type='displayed' timestamp='2022-02-26T09:11:05.634232Z'/>\n    <marker from='bob@localhost' id='KLMNOPQRST' type='displayed' timestamp='2022-02-26T09:11:07.382923Z'/>\n  </query>\n</iq>\n
"},{"location":"open-extensions/smart-markers/#example-groupchats","title":"Example: groupchats","text":"
<!-- Alice fetches markers in a groupchat -->\n<iq id='iq-unique-id' type='get'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='room@muc.localhost' />\n</iq>\n\n<!-- She receives as an answer -->\n<iq from='alice@localhost' to='alice@localhost/res1' id='iq-unique-id' type='result'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='room@muc.localhost'>\n    <marker from='alice@localhost' id='XOLWEMUNTO' type='displayed' timestamp='2022-02-26T09:11:05.634232Z'/>\n    <marker from='bob@localhost' id='NNTMWMKSOE' type='displayed' timestamp='2022-02-26T09:11:07.382923Z'/>\n    <marker from='mike@localhost' id='OSNTETNHUR' type='displayed' timestamp='2022-02-26T09:13:07.382923Z'/>\n    <marker from='kate@localhost' id='SNWMENSTUH' type='displayed' timestamp='2022-02-26T09:12:07.382923Z'/>\n  </query>\n</iq>\n
"},{"location":"open-extensions/token-reconnection/","title":"Token-based reconnection","text":""},{"location":"open-extensions/token-reconnection/#introduction","title":"Introduction","text":"

Automatic reconnection after spurious disconnection is a must-have feature in modern IM applications. One way of providing this feature is storing the user login information on the disk. Here you need to balance two values - security and convenience for the end-user. To put it simply: storing passwords in plaintext is inherently insecure while protecting the XMPP password with a master-password damages the user experience. With a token-based authentication mechanism, the user has to provide login information only once, for the initial connection to the XMPP server, and can later rely on the application's automatic use of tokens for subsequent reconnections.

Reconnecting to the XMPP server, usually means that the client has to go through the same long process of SASL challenge-response exchange which may cause noticeable lags, especially while using SCRAM-based mechanisms. Providing a token to the XMPP server is secure and doesn't require multiple challenge-response roundtrips, therefore might significantly speed up reconnection times.

"},{"location":"open-extensions/token-reconnection/#requirements","title":"Requirements","text":"

This extension requires the client application to authenticate to the XMPP server using a regular XMPP authentication mechanism like SCRAM-SHA-1 at least once.

After that, the following authentications may be done using X-OAUTH SASL mechanism with a token obtained from the server.

To enable the feature, modules mod_auth_token and mod_keystore have to be enabled on the server. For more details regarding the configuration see mod_auth_token documentation and mod_keystore.

"},{"location":"open-extensions/token-reconnection/#token-types","title":"Token types","text":"Token Type Description Access token These are short lived tokens whose grants aren't tracked by the server (i.e. there's no need to store anything in a database). Access tokens can be used as a payload for the X-OAUTH authentication mechanism and grant access to the system. Access tokens can't be revoked. An access token is valid only until its expiry date is reached. Refresh token These are longer lived tokens which are tracked by the server, and therefore require persistent storage. Refresh tokens can be used as a payload for the X-OAUTH authentication mechanism and grant access to the system, as well as result in a new set of tokens being returned upon successful authentication. Refresh tokens can be revoked. A refresh token is valid until it has expired, unless it has been revoked. On revocation, it immediately becomes invalid. As the server stores information about granted tokens, it can also persistently mark them as revoked.

While only two token types have been described above, implementations might use other token types for specific purposes. For example, a particular token type could limit the access privileges of a user logged into the system or denote an affiliation with a Multi User Chat room. None of such capability grants are a subject of this specification though.

"},{"location":"open-extensions/token-reconnection/#use-cases","title":"Use cases","text":""},{"location":"open-extensions/token-reconnection/#obtaining-a-token","title":"Obtaining a token","text":"

After authenticating with some other mechanism like SCRAM-SHA-1, a client may request a token from the server by sending the following iq get to its own bare JID:

Client requests tokens

<iq type='get' to='alice@wonderland.com' id='123'>\n    <query xmlns='erlang-solutions.com:xmpp:token-auth:0'/>\n</iq>\n

Server responds with a tokens

<iq from=\"alice@wonderland.com\" type=\"result\" to=\"alice@wonderland.com/resource\" id=\"123\">\n  <items xmlns=\"erlang-solutions.com:xmpp:token-auth:0\">\n    <access_token>YWNjZXNzAGFsaWNlQHdvbmRlcmxhbmQuY29tL01pY2hhbC1QaW90cm93c2tpcy1NYWNCb29rLVBybwA2MzYyMTg4Mzc2NAA4M2QwNzNiZjBkOGJlYzVjZmNkODgyY2ZlMzkyZWM5NGIzZjA4ODNlNDI4ZjQzYjc5MGYxOWViM2I2ZWJlNDc0ODc3MDkxZTIyN2RhOGMwYTk2ZTc5ODBhNjM5NjE1Zjk=</access_token>\n    <refresh_token>cmVmcmVzaABhbGljZUB3b25kZXJsYW5kLmNvbS9NaWNoYWwtUGlvdHJvd3NraXMtTWFjQm9vay1Qcm8ANjM2MjMwMDYxODQAMQAwZGQxOGJjODhkMGQ0N2MzNTBkYzAwYjcxZjMyZDVmOWIwOTljMmI1ODU5MmNhN2QxZGFmNWFkNGM0NDQ2ZGU2MWYxYzdhNTJjNDUyMGI5YmIxNGIxNTMwMTE4YTM1NTc=</refresh_token>\n  </items>\n</iq>\n
"},{"location":"open-extensions/token-reconnection/#authentication-with-an-access-token","title":"Authentication with an access token","text":"

Client authenticates with an access token

<auth xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\" mechanism=\"X-OAUTH\">\nYWNjZXNzAGFsaWNlQHdvbmRlcmxhbmQuY29tL01pY2hhbC1QaW90cm93c2tpcy1NYWNCb29rLVBybwA2MzYyMTg4Mzc2NAA4M2QwNzNiZjBkOGJlYzVjZmNkODgyY2ZlMzkyZWM5NGIzZjA4ODNlNDI4ZjQzYjc5MGYxOWViM2I2ZWJlNDc0ODc3MDkxZTIyN2RhOGMwYTk2ZTc5ODBhNjM5NjE1Zjk=\n</auth>\n
Server responds with a success

<success xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\"/>\n
"},{"location":"open-extensions/token-reconnection/#authentication-with-a-refresh-token","title":"Authentication with a refresh token","text":"

In this situation server will respond with a new refresh token which SHOULD be used in future authentication.

Client authenticates with a refresh token

<auth xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\" mechanism=\"X-OAUTH\">\ncmVmcmVzaABhbGljZUB3b25kZXJsYW5kLmNvbS9NaWNoYWwtUGlvdHJvd3NraXMtTWFjQm9vay1Qcm8ANjM2MjMwMDYxODQAMQAwZGQxOGJjODhkMGQ0N2MzNTBkYzAwYjcxZjMyZDVmOWIwOTljMmI1ODU5MmNhN2QxZGFmNWFkNGM0NDQ2ZGU2MWYxYzdhNTJjNDUyMGI5YmIxNGIxNTMwMTE4YTM1NTc=\n</auth>\n

Server responds with a success and a new refresh token

<success xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\">\ncmVmcmVzaABhbGljZUB3b25kZXJsYW5kLmNvbS9NaWNoYWwtUGlvdHJvd3NraXMtTWFjQm9vay1Qcm8ANjM2MjMwMDYxODQAMgAwZGQxOGJjODhkMGQ0N2MzNTBkYzAwYjcxZjMyZDVmOWIwOTljMmI1ODU5MmNhN2QxZGFmNWFkNGM0NDQ2ZGU2MWYxYzdhNTJjNDUyMGI5YmIxNGIxNTMwMTE4YTM1NTc=\n</success>\n
"},{"location":"open-extensions/token-reconnection/#token-format","title":"Token format","text":"

All tokens are exchanged as Base64 encoded binary data. Serialization format of the token before encoding with Base64 is dependent on its type. Common parts in every token are BARE_JID and EXPIRES_AT. EXPIRES_AT is a timestamp saying when a given token will expire. \\0 stands for the ASCII null character (i.e. byte 0). Text in single quotes ('example') is literal. ALL_CAPS denote parameters.

"},{"location":"open-extensions/token-reconnection/#access-token-format","title":"Access token format","text":"
BASE64_encode\n        ('access', \\0, BARE_JID, \\0, EXPIRES_AT, \\0, DATA)\n

Example (please note the line break was added only for readability):

'access' \\0 Q8@wonderland.com \\0 64875466454\n    \\0 0acd0a66d06934791d046060cf9f1ad3c2abb3274cc7e7d7b2bc7e2ac4453ed774b6c6813b40ebec2bbc3774d59d4087\n
"},{"location":"open-extensions/token-reconnection/#refresh-token-format","title":"Refresh token format","text":"
BASE64_encode\n        ('refresh', \\0, BARE_JID, \\0, EXPIRES_AT, \\0, SEQUENCE_NO, \\0, DATA)\n

Example (please note the line break was added only for readability):

'refresh' \\0 qp@wonderland.com \\0 64875466457 \\0 6\n    \\0 8f57cb019cd6dc6e7779be165b9558611baf71ee4a40d03e77b78b069f482f96c9d23b1ac1ef69f64c1a1db3d36a96ad\n
"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/","title":"Cluster configuration and node management","text":""},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#environment-configuration","title":"Environment configuration","text":""},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#file-descriptors","title":"File descriptors","text":"

To handle large traffic, some of the system variables need to be tuned. Number one on that list is the maximum number of file descriptors which often is set to 1024. Each MongooseIM connection consumes ~1 file descriptor, so the default value will not suffice for larger installations - when it is exceeded, emfile errors will appear in logs.

To check the current limit execute: ulimit -n.

To list all limits execute: ulimit -a.

In the example below we set limits for a mongooseim user. To increase the limit the following entries should be added in /etc/security/limits.conf:

mongooseim   soft   nofile   1000000\nmongooseim   hard   nofile   1000000\n

If you are using Ubuntu, all /etc/pam.d/common-session* files should include session required pam_limits.so.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#vmargs-file","title":"vm.args file","text":"

This file contains Erlang options used when starting the VM. It is located in REL_ROOT/etc/vm.args where REL_ROOT is the path to a MongooseIM release (ie. _build/prod/rel/mongooseim if you build MongooseIM from source).

When using an SSL/TLS connection we advise to increase ERL_MAX_PORTS to 350000. This value specifies how many ports (files, drivers, sockets etc) can be used by the Erlang VM. Be cautious - it preallocates some structures inside the VM and will have impact on the memory usage. We suggest 350000 for 100\u00a0k users when using an SSL/TLS connection or 250000 in other cases.

To check how memory consumption changes depending on ERL_MAX_PORTS, use the following command:

env ERL_MAX_PORTS=[given value] erl -noinput -eval 'io:format(\"~p~n\",[erlang:memory(system)]).' -s erlang halt\n

Another change you need to make when building a MongooseIM cluster is setting the -sname. To do it, just set the -sname option in vm.args with node's hostname, which must be resolvable on other nodes in the cluster.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#port-range","title":"Port range","text":"

To connect to other nodes, a freshly started node uses a port from the range inet_dist_listen_min to inet_dist_listen_max.

To enable this, add the following line to the vm.args file:

-kernel inet_dist_listen_min 50000 inet_dist_listen_max 50010\n

Make sure that the range you set provides enough ports for all the nodes in the cluster.

Remember to keep an epmd port open (port 4369) if any firewall restrictions are required. Epmd keeps track of which Erlang node is using which ports on the local machine.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#connecting-nodes","title":"Connecting nodes","text":"

Checklist:

  • working directory rel/mongooseim (root of a MongooseIM release or installation)
  • the same cookie across all nodes (vm.args -setcookie parameter)
  • each node should be able to ping other nodes using its sname (ex. net_adm:ping('mongoose@localhost'))
  • RDBMS backend is configured, so CETS could discover nodes
"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#initial-node","title":"Initial node","text":"CETSMnesia

Clustering is automatic. There is no difference between nodes.

There is no action required on the initial node.

Just start MongooseIM using mongooseim start or mongooseim live.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#new-node-joining-cluster","title":"New node - joining cluster","text":"CETSMnesia

Clustering is automatic.

mongooseimctl start\nmongooseimctl started #waits until MongooseIM starts\nmongooseimctl join_cluster ClusterMember\n

ClusterMember is the name of a running node set in vm.args file, for example mongooseim@localhost. This node has to be part of the cluster we'd like to join.

First, MongooseIM will display a warning and a question if the operation should proceed:

Warning. This will drop all current connections and will discard all persistent data from Mnesia. Do you want to continue? (yes/no)\n

If you type yes MongooseIM will start joining the cluster. Successful output may look like the following:

You have successfully joined the node mongooseim2@localhost to the cluster with node member mongooseim@localhost\n

In order to skip the question you can add option -f which will perform the action without displaying the warning and waiting for the confirmation.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#leaving-cluster","title":"Leaving cluster","text":"CETSMnesia

Stopping the node is enough to leave the cluster. If you want to avoid the node joining the cluster again, you have to specify a different cluster_name option in the CETS backend configuration. A different Erlang cookie is a good idea too.

To leave a running node from the cluster, call:

mongooseimctl leave_cluster\n

It only makes sense to use it if the node is part of a cluster, e.g join_cluster was called on that node before.

Similarly to join_cluster a warning and a question will be displayed unless the option -f is added to the command.

The successful output from the above command may look like the following:

The node mongooseim2@localhost has successfully left the cluster\n
"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#removing-a-node-from-the-cluster","title":"Removing a node from the cluster","text":"CETSMnesia

A stopped node would be automatically removed from the node discovery table in RDBMS database after some time. It is needed so other nodes would not try to connect to the stopped node.

To remove another node from the cluster, call the following command from one of the cluster members:

mongooseimctl remove_from_cluster RemoteNodeName\n

where RemoteNodeName is the name of the node that we'd like to remove from our cluster. This command could be useful when the node is dead and not responding and we'd like to remove it remotely. The successful output from the above command may look like the following:

The node mongooseim2@localhost has been removed from the cluster\n
"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#cluster-status","title":"Cluster status","text":"CETSMnesia

Run the command:

mongooseimctl cets systemInfo\n

joinedNodes should contain a list of properly joined nodes:

\"joinedNodes\" : [\n  \"mongooseim@node1\",\n  \"mongooseim@node2\"\n]\n

It should generally be equal to the list of discoveredNodes.

If it is not equal, you could have some configuration or networking issues. You can check the unavailableNodes, remoteNodesWithUnknownTables, and remoteNodesWithMissingTables lists for more information (generally, these lists should be empty). You can read the description for other fields of systemInfo in the GraphQL API reference.

For a properly configured 2 nodes cluster the metrics would show something like that:

mongooseimctl metric getMetrics --name '[\"global\", \"cets\", \"system\"]'\n{\n  \"data\" : {\n    \"metric\" : {\n      \"getMetrics\" : [\n        {\n          \"unavailable_nodes\" : 0,\n          \"type\" : \"cets_system\",\n          \"remote_unknown_tables\" : 0,\n          \"remote_nodes_without_disco\" : 0,\n          \"remote_nodes_with_unknown_tables\" : 0,\n          \"remote_nodes_with_missing_tables\" : 0,\n          \"remote_missing_tables\" : 0,\n          \"name\" : [\n            \"global\",\n            \"cets\",\n            \"system\"\n          ],\n          \"joined_nodes\" : 2,\n          \"discovery_works\" : 1,\n          \"discovered_nodes\" : 2,\n          \"conflict_tables\" : 0,\n          \"conflict_nodes\" : 0,\n          \"available_nodes\" : 2\n        }\n      ]\n    }\n  }\n}\n

You can use the following commands on any of the running nodes to examine the cluster or to see if a newly added node is properly clustered:

mongooseimctl mnesia info | grep \"running db nodes\"\n

This command shows all running nodes. A healthy cluster should contain all nodes here. For example:

running db nodes = [mongooseim@node1, mongooseim@node2]\n
To see stopped or misbehaving nodes the following command can be useful:

mongooseimctl mnesia info | grep \"stopped db nodes\"\n

This command shows which nodes are considered stopped. This does not necessarily indicate that they are down but might be a symptom of a communication problem.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#load-balancing","title":"Load Balancing","text":""},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#elastic-load-balancer-elb","title":"Elastic Load Balancer (ELB)","text":"

When using ELB please be advised that some warm-up time may be needed before the load balancer works efficiently for a big load.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#software-load-balancer","title":"Software load balancer","text":"

A good example of load balancing on the application layer are HAProxy and Nginx.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#dns-based-load-balancing","title":"DNS-based load balancing","text":"

Load balancing can be performed on a DNS level. A DNS response can have a number of IP addresses that can be returned to the client side in a random order.

On the AWS stack this type of balancing is provided by Route53. The description of their service can be found in the Route53 Developer's Guide.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#other","title":"Other","text":"

The approaches described above can be mixed - we can use DNS load balancing to pick a software load balancer which will select one of the nodes.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/","title":"Cluster management considerations","text":"

These apply to bare metal, virtualization, hypervisor, containers and other technologies.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#single-node-mongooseim","title":"Single-node MongooseIM","text":"

With a single-node MongooseIM, one can set up a vertically scalable system, that is a function of the server resources. MongooseIM can scale from hundreds to tens of thousands of concurrent users.

Note that in a single-node MongooseIM, there is no load distribution, and no fallback or failover in case of a failure.

This architecture is suitable for low-scale deployments, such as testing and development environments on embedded devices, personal computers, or servers.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#dual-node-mongooseim","title":"Dual-node MongooseIM","text":"

With a dual-node MongooseIM, one can set up a vertically scalable system, that is a function of the servers' resources. We recommend that servers with the same power are used. Both nodes can handle different sets of services, given that these non-MongooseIM services consume roughly the same resources on both servers. In this setup, MongooseIM can scale up to hundred of thousands of concurrent users.

In a dual-node MongooseIM, there is a 50-50 load distribution - there is a possible fallback or failover in case of a node failure. Please keep in mind that to avoid degrading the service the remaining node should be able to handle the full load when necessary.

This setup is applicable to low to mid-scale deployments used f.e. for functional and load testing. We recommend using real dedicated servers, although MongooseIM could run in a cluster mode with low-power machines, such as embedded devices.

This setup provides better fault tolerance and robustness than the single-node but it's not recommended for production environments. The minimum recommended production setup is 3 nodes.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#multi-node-mongooseim","title":"Multi-node MongooseIM","text":"

With a multi-node MongooseIM, one can set up a system that is highly scalable both vertically and horizontally and that is still a function of the servers' resources. We recommend that servers with the same power are used. We also recommend that all the nodes handle the same set of services. In this setup, MongooseIM can scale up to tens of millions of concurrent users.

In a multi-node MongooseIM, with n nodes, there is a 1/n load distribution - there is a possible fallback or failover in case of a node failure. To avoid degrading the service the remaining nodes should be able to handle 1/(n-1) load when necessary.

This setup fits mid and large-scale deployments, such as production environments. We recommend using real dedicated, powerful servers.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#multi-datacenter-mongooseim","title":"Multi-datacenter MongooseIM","text":"

With a multi-datacenter MongooseIM, one can set up a system that is highly scalable both vertically and horizontally. The MongooseIM clusters are simply distributed across continents for local, low-lag client connections, and the clusters are interconnected via high-speed links. In this setup, MongooseIM can scale up to hundreds of millions of concurrent users.

This applies to large and very large-scale deployments.

We advise contacting us in case of such a big deployment.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#summary-table","title":"Summary table","text":"

Setup: reflects the number of nodes in your cluster. Purpose: is the goal and use of this cluster. Low-end: number of concurrent users on low-power machines, such as laptops, embedded devices, entry-level cloud or bare metal. High-end: number of concurrent users on powerful machines, with lots of memory, multi-core CPU, whether in cloud or bare metal.

Setup Purpose Low-end High-end Single-node Functional testing, development 100 to 10k 100k to 500k Dual-node Low-end system, load testing 1k to 100k 1M to 3M Multi-node High-end production system 10k to 1M 2M to 10M Multi-datacenter Very large scale production system 100k to 10M 10M to 100M"},{"location":"operation-and-maintenance/Cluster-management-considerations/#important-notes","title":"Important notes","text":"

Scalability highly depends on variables such as:

  • machine's power, such as memory, CPU, I/O
  • the type of concurrent users:
    • most iOS apps are not connected in the background, they use APNS to push info to the device
    • web clients use websockets, with fallback on BOSH (HTTP long-polling)
    • client-side and backend-side REST API
  • how much archiving is needed and the latency for storage and querying, which depends a lot on storage backend architecture
  • message throughput:
    • one-to-one
    • MUC
    • MUC light
    • PubSub
    • Presences
    • HTTP notifications (may include queuing systems such as RabbitMQ or Kafka)
  • latency of messaging, both real-time and archived messages
"},{"location":"operation-and-maintenance/Cluster-management-considerations/#os-configuration","title":"OS configuration","text":"

To achieve high scalability you have to adjust the configuration of your operating system.

First, set some network related parameters - this is what we use for load testing:

Parameter Value net.ipv4.ip_local_port_range 1024 65535 net.ipv4.tcp_mem 16777216 16777216 16777216 net.ipv4.tcp_wmem 4096 87380 16777216 net.ipv4.tcp_rmem 4096 87380 16777216

Then, you have to increase the number of file descriptors allowed for the user running your MongooseIM server process. In Linux, this is most commonly done in /etc/security/limits.conf. You should remember, though, that there is a limit to it \u2014 you can't increase it above an upper bound which is set by the fs.file-max kernel parameter. And there is a limit to a possible increase in fs.file-max as well \u2014 you can't increase it beyond 1048576, which is 2^20 and is set by another kernel parameter, fs.nr_open. Once you increase that one, you are good to go.

"},{"location":"operation-and-maintenance/Cluster-restart/","title":"Cluster restart","text":"

When you are using a MongooseIM cluster that is using Mnesia backend for any extensions, there could occur an issue related to the distributed Mnesia nodes.

"},{"location":"operation-and-maintenance/Cluster-restart/#how-to-restart-a-cluster","title":"How to restart a cluster:","text":"

Having Node A and Node B, the cluster restart procedure should occur in the following way:

Start the nodes in the opposite order to the one in which they were stopped. The first node you restart should be the last one to go down. For cluster with 3 nodes, after stopping the nodes ABC, they should be started in CBA order.

"},{"location":"operation-and-maintenance/Cluster-restart/#how-not-to-restart-a-cluster","title":"How NOT to restart a cluster:","text":"

Having Node A and Node B.

When the nodes are stopped in AB order, starting the node A first can result in issues related to the distributed Mnesia nodes and not bring up a node that is fully operational.

Changing the order of the restarted nodes can cause issues with distributed Mnesia. Make sure to follow the recommendations if you are using Mnesia backend for any of the extensions. Please note that for some of the extensions, the Mnesia backend is set by default without having that configured explicitly in the configuration file.

For more information related to the cluster configuration and maintenance, please see Cluster configuration and node management section.

"},{"location":"operation-and-maintenance/Humio/","title":"Humio and MongooseIM","text":""},{"location":"operation-and-maintenance/Humio/#getting-humios-ingest-token","title":"Getting Humio's ingest token","text":"

Visit this url to create a new sandbox's ingest token.

The URL is:

https://cloud.humio.com/YOUR_REPOSITORY_NAME_HERE/settings/ingest-tokens\n
"},{"location":"operation-and-maintenance/Humio/#configure-filebeat","title":"Configure Filebeat","text":"

Configure Filebeat, using this config file priv/filebeat.mongooseim.humio.yml.

We recommend to use the Filebeat docker container. You have to use an open-source version of Filebeat, which has the oss suffix.

This example mounts a log directory $(pwd)/_build/mim1/rel/mongooseim/log as a volume for Filebeat. It also mounts a configuration file $(pwd)/priv/filebeat.mongooseim.humio.yml. Most likely these paths would be different on your machine.

Pass your Humio ingest token as a password argument. Or uncomment and change it inside the filebeat.mongooseim.humio.yml file.

docker run -d \\\n    --name mongooseim-filebeat \\\n    -v \"$(pwd)/_build/mim1/rel/mongooseim/log:/usr/lib/mongooseim/log\" \\\n    -v \"$(pwd)/priv/filebeat.mongooseim.humio.yml:/usr/share/filebeat/filebeat.yml:ro\" \\\n    docker.elastic.co/beats/filebeat-oss:7.9.2 \\\n    filebeat -e -E output.elasticsearch.password=\"abc12345-xxxx-yyyy-zzzz-123456789abc\"\n

Argument -e enables debugging information for Filebeat that can be visible using the docker logs mongooseim-filebeat command.

"},{"location":"operation-and-maintenance/Humio/#viewing-logs","title":"Viewing logs","text":"

Navigate to https://cloud.humio.com/sandbox/search to see the Sandbox's dashboard.

A list of log messages:

Structured log message:

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/","title":"Logging & monitoring","text":""},{"location":"operation-and-maintenance/Logging-%26-monitoring/#logs","title":"Logs","text":"

We strongly recommend storing logs in one centralized place when working in a clustered environment. MongooseIM uses the standard OTP logging framework: Logger. Its handlers can be replaced and customised, according to Logger's documentation.

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#syslog-integration","title":"Syslog integration","text":"

MongooseIM uses syslogger as a Logger handler for syslog. To activate it you have to add syslogger to the applications section in src/mongooseim/app.src:

%% syslogger, % uncomment to enable a logger handler for syslog\n

You also need to edit rel/files/app.config and uncomment the lines:

 % Uncomment these lines to enable logging to syslog.\n % Remember to add syslogger as a dependency in mongooseim.app.src.\n%% {syslogger, [\n%%     {ident, \"mongooseim\"},\n%%     {logger, [\n%%         {handler, sys_log, syslogger,\n%%          #{formatter => {logger_formatter, #{single_line => true}}}}]}]\n%% },\n

You can provide different parameters to change the handler's behaviour as described in the syslogger's GitHub page:

  • ident - a string to tag all the syslog messages with. The default is mongooseim.
  • facility - the facility to log to (see the syslog documentation).
  • log_opts - see the syslog documentation for the description.

Depending on the system you use, remember to also add the appropriate line in the syslog config file. For example, if the facility local0 is set:

local0.info                     /var/log/mongooseim.log\n

All the logs of level info should be passed to the /var/log/mongooseim.log file.

Example log (e.g tail -f /var/log/mongooseim.log):

Apr  1 12:36:49 User.local mongooseim[6068]: [info] <0.7.0> Application mnesia started on node mongooseim@localhost\n
"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#further-multiserver-integration","title":"Further / multiserver integration","text":"

For more advanced processing and analysis of logs, including gathering logs from multiple machines, you can use one of the many available systems (e.g. logstash/elasticsearch/kibana, graylog, splunk), by redirecting mongoose logs to such service with an appropriate Logger's handler.

Check Logging for more information.

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#monitoring","title":"Monitoring","text":""},{"location":"operation-and-maintenance/Logging-%26-monitoring/#wombatoam","title":"WombatOAM","text":"

WombatOAM is an operations and maintenance framework for Erlang based systems. Its Web Dashboard displays this data in an aggregated manner. Additionally, WombatOAM provides interfaces to feed the data to other OAM tools such as Graphite, Nagios or Zabbix.

For more information see: WombatOAM.

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#graphite-collectd","title":"graphite-collectd","text":"

To monitor MongooseIM during load testing, we recommend the following open source applications:

  • Grafana is used for data presentation.
  • Graphite is a server used for metrics storage.
  • collectd is a daemon running on the monitored nodes capturing data related to CPU and Memory usage, IO etc.
"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#plug-in-exometer-reporters","title":"Plug-in Exometer reporters","text":"

MongooseIM uses a fork of Exometer library for collecting metrics. Exometer has many plug-in reporters that can send metrics to external services. We maintain exometer_report_graphite and exometer_report_statsd for Graphite and StatsD respectively. It is possible to enable them in MongooseIM via the app.config file. The file sits next to the mongooseim.toml file in the rel/files and _REL_DIR_/etc directories.

Below you can find a sample configuration. It shows setting up a reporter connecting to graphite running on localhost.

You can see an additional option not listed in the Exometer docs - mongooseim_report_interval, which sets the metrics' resolution, i.e. how often Exometer gathers and sends metrics through reporters. By default, the resolution is set to 60 seconds.

...\n{exometer_core, [\n    {mongooseim_report_interval, 60000}, %% 60 seconds\n    {report, [\n        {reporters, [\n                     {exometer_report_graphite, [\n                                                 {prefix, \"mongooseim\"},\n                                                 {connect_timeout, 5000},\n                                                 {host, \"127.0.0.1\"},\n                                                 {port, 2003},\n                                                 {api_key, \"\"}\n                                                ]}\n                    ]}\n    ]}\n  ]}\n...\n
"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#run-graphite-grafana-in-docker-quick-start","title":"Run Graphite & Grafana in Docker - quick start","text":"

The following commands will download the latest version of kamon/grafana_graphite docker image that contains both Grafana and Graphite, and start them while mounting the local directory ./docker-grafana-graphite-master/data for metric persistence:

curl -SL https://github.com/kamon-io/docker-grafana-graphite/archive/master.tar.gz | tar -xzf -\nmake -C docker-grafana-graphite-master up\n

Go to http://localhost:80 to view the Grafana dashboard that's already set up to use metrics from Graphite.

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#add-metrics-to-grafana-dashboard","title":"Add metrics to Grafana dashboard","text":"

We recommend the following metrics as a baseline for tracking your MongooseIM installation. For time-based metrics, you can choose to display multiple calculated values for a reporting period - we recommend tracking at least max, median and mean.

Session count:                   <prefix>.global.totalSessionCount.value\nXMPP messages received:          <prefix>.<domain>.xmppMessageReceived.one\nXMPP messages sent:              <prefix>.<domain>.xmppMessageSent.one\nSuccessful logins:               <prefix>.<domain>.sessionSuccessfulLogins.one\nLogouts:                         <prefix>.<domain>.sessionLogouts.one\nAuthorization time:              <prefix>.<domain>.backends.auth.authorize.<value-type>\nRDBMS \"simple\" query time:       <prefix>.<domain>.backends.mongoose_rdbms.query.<value-type>\nRDBMS prepared query time:       <prefix>.<domain>.backends.mongoose_rdbms.execute.<value-type>\nMAM lookups:                     <prefix>.<domain>.mam_lookup_messages.one\nMAM archivization time:          <prefix>.<domain>.backends.mod_mam_pm.archive.<value-type>\nMAM lookup time:                 <prefix>.<domain>.backends.mod_mam_pm.lookup.<value-type>\nMAM private messages flush time: <prefix>.<domain>.mod_mam_rdbms_async_pool_writer.flush_time.<value-type>\nMAM MUC messages flush time:     <prefix>.<domain>.mod_mam_muc_rdbms_async_pool_writer.flush_time.<value-type>\n

Note that RDBMS metrics are only relevant if MongooseIM is configured with an RDBMS backend, MAM metrics when mod_mam is enabled and MAM flush times when MAM is configured with an RDBMS backend with async_writer option (default).

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#example-graph-in-grafana","title":"Example graph in Grafana","text":"

This screenshot shows a graph plotting the RDBMS simple query time metric mentioned above. The graph is plotted for three nodes with each node having a different prefix: mongoose.node1, mongoose.node2 and mongoose.node3.

The queries take metrics for all nodes and all domains (** is a wildcard for multiple parts of the metric name) and group them per-node and per-value-type (respectively 1st and -1st part of the metric's name). Parts of the names are indexed from 0.

Time-based metrics in MongooseIM are given in microseconds, so to display human-readable values in graph's legend, the Y-axis unit has to be edited on the Axes tab.

"},{"location":"operation-and-maintenance/Logging-fields/","title":"Fields","text":"
  • reason, class, stacktrace: standard error catching fields.
  • module, function, line, timestamp, node, when, pid: reserved fields (could be used by logger itself).
  • When logging IQs, adding the acc field should be enough. If acc not available, iq can be used. If iq is not available, sub_el could be logged as a last option.
  • what: why we are logging. We often use the function name as the what field.
    • Suffixes: If something goes wrong, use a _failed suffix (instead of unable_to and _error). The most common suffixes are _starting, _started, _stopping, _stopped, and _result.
    • Prefixes: We sometimes add prefixes to what to signal where we are logging from. Such prefixes should be short. Please, don't prefix with the complete module name. Some examples for prefixes are: mam_, sm_, muc_, auth_, s2s_, pool_.

When checking the final event name, remove duplicates from it.

Bad event names Good event names Why s2s_dns_error s2s_dns_lookup_failed Not _failed prefix s2s_dns_error s2s_dns_lookup_timeout More specific failure reason mod_mam_starting mam_starting Use mam_ prefix for MAM modules mongoose_wpool_mgr_pool_starting pool_starting Too long and repetitive"},{"location":"operation-and-maintenance/Logging-fields/#logger-defaults","title":"Logger defaults","text":"

Timestamp should be ordered first when possible, so that sorting is automatic.

Name Type Description Examples timestamp atom The timestamp (with timezone information) 2018-07-11T13:41:10+00:00 at string Where in code the call or log line was emitted module:function:line level enum log level according to RFC 5424 warning"},{"location":"operation-and-maintenance/Logging-fields/#generally-required","title":"Generally required","text":"Name Type Description Examples Notes what atom Event (or issue) name remove_user_failed text binary Human readable description <<\"MAM failed to contact MySQL\">> result binary Explanation of the what key failed Optional tags [atom] The subcomponent taking action and logging data. [c2s, presence], [mam, rdbms] This category should be chosen based on filtering needs, and may represent the domain of concern for some operations"},{"location":"operation-and-maintenance/Logging-fields/#http-requests","title":"HTTP requests","text":"Name Type Description Examples Notes path binary HTTP path <<\"/api/add_user\">> code integer HTTP code 200 ip tuple IP address inet:ip_address() port integer TCP/UDP port number 5222 peer tuple peer() :: {inet:ip_address(), inet:port_number()} {{127,0,0,1},5222} req map Cowboy request Provide when available reply_body binary Body reply <<\"ok\">>"},{"location":"operation-and-maintenance/Logging-fields/#xmpp","title":"XMPP","text":"Name Type Description Examples Notes acc map mongoose_acc, used to extract fields #{...} user binary Local Username <<\"alice\">> Use #jid.luser when available server binary Local Server (host) name <<\"localhost\">> Use #jid.lserver when available sub_host binary Subhost when MUC or pubsub are used <<\"muc.localhost\">> It's not the same as server remote_user binary Remote Username (usually who makes IQ requests) <<\"alice\">> Use #jid.luser when available remote_server binary Remote Server (usually who makes IQ requests) <<\"otherhost\">> Use #jid.lserver when available iq record MongooseIM IQ record #iq{} Provide when available (but it could be acc instead) sub_el record IQ sub element #xmlel{} Provide ONLY if iq not available c2s_state record C2S process state, that would be used by formatter #state{} from_jid binary Accumulator's from_jid <<\"alice@localhost\">> to_jid binary Accumulator's to_jid <<\"to@localhost\">> packet binary Accumulator's element <<\"<message>...\">> Encoded as XML, not erlang records exml_packet record Same as packet, but in #xmlel{} format #xmlel{} Record, formatted in formatter"},{"location":"operation-and-maintenance/Logging-fields/#other-requests","title":"Other requests","text":"Name Type Description Examples Notes duration integer Duration of some operation in milliseconds 5000 Don't use it for microseconds state_name atom State name in gen_fsm wait_for_stream state term gen_server state #state{} Consider adding a formatter call_from tuple From argument in gen_server's handle_call {Pid, Tag}"},{"location":"operation-and-maintenance/Logging-fields/#when-logging-exceptions","title":"When logging exceptions","text":"

what key should contain en _exception suffix. Following keys should be present:

Name Type Description Examples Notes class enum catch Class:Reason:Stacktrace error reason term catch Class:Reason:Stacktrace http_timeout stacktrace term catch Class:Reason:Stacktrace [...] Formatted by formatter"},{"location":"operation-and-maintenance/Logging-fields/#macros-for-logging-unexpected-requests","title":"Macros for logging unexpected requests","text":"

gen_server processes sometimes receive messages they couldn't process. We use macros to log such events (just because you would need them in each gen_server module).

We don't need to log state or state names for such events.

%% We don't always handle unexpected calls.\nhandle_call(Request, From, State) ->\n    ?UNEXPECTED_CALL(Request, From),\n    {reply, {error, unexpected_call}, State}.\n\n%% We don't always handle unexpected casts.\nhandle_cast(Msg, State) ->\n    ?UNEXPECTED_CAST(Msg),\n    {noreply, State}.\n\n%% We SHOULD ignore all unexpected messages, because they could arrive in case\n%% of gen_server call timeouts.\nhandle_info(Msg, State) ->\n    ?UNEXPECTED_INFO(Msg),\n    {noreply, State}.\n

These macros translate into warning logs with the following keys, respectively:

#{what => unexpected_cast, msg => Msg}.\n#{what => unexpected_info, msg => Msg}.\n#{what => unexpected_call, msg => Msg, call_from => From}.\n
"},{"location":"operation-and-maintenance/Logging/","title":"Configuring logging","text":"

The main configuration for logging is in the Application Config file. You can find it in mongooseim/etc/app.config in the release directory.

"},{"location":"operation-and-maintenance/Logging/#primary-log-level","title":"Primary log level","text":"

Primary log level sets maximum log level in the system. This check is applied for any event in the system before the event is passed to any handler.

Primary log level, that is used before MongooseIM config is loaded:

[\n    {kernel, [\n         {logger_level, notice}\n    ]}\n].\n

Once MongooseIM config is loaded, the loglevel option from mongooseim.toml is used instead.

"},{"location":"operation-and-maintenance/Logging/#primary-filters","title":"Primary filters","text":"

Functions from the filters section are applied for any message once it passes the primary log level check.

Keep that configuration block as it is, unless you are planning to extend the filtering logic.

[{kernel, [\n  {logger, [\n    %% Default filters applied to all events before passing them to handlers:\n    {filters, log, [\n           %% If we want to see complete accumulator in logs\n        %  {preserve_acc_filter, {fun mongoose_log_filter:preserve_acc_filter/2, no_state}},\n           {format_packet_filter, {fun mongoose_log_filter:format_packet_filter/2, no_state}},\n           {format_acc_filter, {fun mongoose_log_filter:format_acc_filter/2, no_state}},\n           {format_c2s_state_filter, {fun mongoose_log_filter:format_c2s_state_filter/2, no_state}},\n           {format_stacktrace_filter, {fun mongoose_log_filter:format_stacktrace_filter/2, no_state}}\n        ]},\n....\n}}].\n

preserve_acc_filter filter is disabled by default, but could be enabled, if you are interested in debugging the accumulator logic (see the mongoose_acc module).

"},{"location":"operation-and-maintenance/Logging/#shell-log-handler","title":"Shell log handler","text":"
  • Controls what MongooseIM prints to the standard output.
  • Erlang OTP docs for logger_std_h
    {handler, shell_log, logger_std_h, #{\n         %% Default log level for handlers is to log everything, that\n         %% passes primary log level and module log levels\n         level => all,\n         formatter => {mongoose_flatlog_formatter, #{\n           map_depth => 3,\n           term_depth => 50\n         }}\n    }},\n
"},{"location":"operation-and-maintenance/Logging/#file-log-handler","title":"File log handler","text":"
  • Controls what and how MongooseIM prints into files.
  • Erlang OTP docs for logger_disk_log_h
  • You can have several file handlers.
  • File handlers should have different handler IDs (i.e. disk_log, disk_json_log)
  • There are two file log handlers defined by default: one that formats in JSON and one that formats in Logfmt format (key=value pairs).
  • Both JSON and Logfmt handlers are enabled by default. We recommend to disable handlers, that you are not using. This could improve performance greatly. To disable them, just remove them from app.config.
  • Check information below about log formatters.
    {handler, disk_log, logger_disk_log_h, #{\n         level => all,\n         config => #{\n           file => \"{{mongooseim_log_dir}}/mongooseim.log\",\n           type => wrap,\n           max_no_files => 5,\n           max_no_bytes => 2097152,\n           sync_mode_qlen => 2000, % If sync_mode_qlen is set to the same value as drop_mode_qlen,\n           drop_mode_qlen => 2000, % synchronous mode is disabled. That is, the handler always runs\n           flush_qlen => 5000,     % in asynchronous mode, unless dropping or flushing is invoked.\n           overload_kill_enable => true\n           % Documentation about Overload protection, together with default values, can be found here:\n           % http://erlang.org/doc/apps/kernel/logger_chapter.html#protecting-the-handler-from-overload\n         },\n         formatter => ...\n    }},\n
"},{"location":"operation-and-maintenance/Logging/#logfmt-file-log-handler","title":"Logfmt file log handler","text":"

Wrapper around the flatlog library with custom template options configured by default.

Options:

  • map_depth - the maximum depth to format maps. map_depth => 3 means that the map #{one => #{two => #{three => #{four => key}}}} would be printed as one_two_three_four=.... While the map #{one => #{two => #{three => key}}} would be still printed as one_two_three=key.
  • term_depth - the maximum depth to which terms are printed. Anything below this depth is replaced with .... unlimited by default.
formatter => {mongoose_flatlog_formatter, #{\n  map_depth => 3,\n  term_depth => 50\n}}\n
"},{"location":"operation-and-maintenance/Logging/#json-file-log-handler","title":"JSON file log handler","text":"

JSON formatted file. It could be used to store messages in ELK, in Humio or in Splunk.

Check this tutorial to configure MongooseIM with Humio. Check below information to configure MongooseIM with ELK.

You can use Filebeat to send messages from the file into ELK.

Options:

  • format_depth - the maximum depth to which terms are printed. Anything below this depth is replaced with .... unlimited by default.
  • format_chars_limit - A soft limit on the number of characters when printing terms. When the number of characters is reached, remaining structures are replaced by \"...\". format_chars_limit defaults to unlimited, which means no limit on the number of characters returned.
  • depth - the maximum depth for json properties. Default is unlimited. Options deeper than the depth are replaced with the ... string.
formatter => {mongoose_json_formatter, #{\n  format_depth => 10,\n  format_chars_limit => 3000,\n  depth => 10\n}}\n
"},{"location":"operation-and-maintenance/Logging/#different-log-level-for-a-specific-module","title":"Different log level for a specific module","text":"

Motivation:

  • Sometimes we are interested in debug messages from a particular module.
  • Useful to debug new or experimental modules.

This example:

  • Changes log level for one particular module.
  • Forwards the log messages to any enabled handler.

Changes:

  • Enable module log level for mongoose_c2s.
%% Module log level\n{module_level, debug, [mongoose_c2s]},\n
"},{"location":"operation-and-maintenance/Logging/#separate-log-for-module-debugging","title":"Separate log for module debugging","text":"

Motivation:

  • Sometimes we are only interested in log messages from one particular module.
  • Useful for debugging and development.
  • Does not affect overload protection in other handlers.

This example:

  • Forwards all logging from a module mongoose_c2s to a separate file.
  • Keeps the other handlers intact.

Changes:

  • Modify any existing handler to explicitly set log level.
  • Enable module log level for mongoose_c2s.
  • Add a new custom handler into kernel.logger options.

Issues:

  • This would also disable module log level logic for other handlers.
%% Existing handlers\n{handler, shell_log, logger_std_h, #{\n     level => notice, %% was level => all\n     ...\n},\n{handler, disk_log, logger_disk_log_h, #{\n      level => notice,\n      ...\n},\n...\n%% Module log level\n{module_level, debug, [mongoose_c2s]},\n%% New handler\n{handler, disk_log_c2s, logger_disk_log_h, #{\n     level => debug,\n     config => #{\n       %% Choose destination:\n       file => \"{{mongooseim_log_dir}}/mongoose_c2s.log\",\n       %% Common options:\n       type => wrap,\n       max_no_files => 5,\n       max_no_bytes => 2097152,\n       sync_mode_qlen => 2000,\n       drop_mode_qlen => 2000,\n       flush_qlen => 5000,\n       overload_kill_enable => true\n     },\n     formatter => {mongoose_flatlog_formatter, #{\n       map_depth => 3,\n       term_depth => 50\n     }},\n     filters => [\n       %% That filter matches messages from mongoose_c2s module\n       {module_filter, {fun mongoose_log_filter:filter_module/2, [mongoose_c2s]}}\n     ]\n}}\n
"},{"location":"operation-and-maintenance/Logging/#setting-up-kibana","title":"Setting up Kibana","text":"

This example sets up ElasticSearch and Kibana for development purposes.

Create a network, so filebeat can find ELK:

docker network create logging\n

Run ELK (consult with the container docs for more options):

docker run -d -p 5601:5601 -p 9200:9200 -p 5044:5044 --network logging --name elk sebp/elk:oss-792\n

Create a volume for logs:

docker volume create mongooseim-logs\n

Run MongooseIM daemon:

docker run -d -t -h mongooseim -v mongooseim-logs:/usr/lib/mongooseim/log \\\n    --network logging --name mongooseim -p 5222:5222 mongooseim/mongooseim:latest\n

The next part is based on Filebeat's docs.

Setup filebeat (should be called once, that creates indexes in Elasticsearch):

docker run --network logging --rm \\\n    docker.elastic.co/beats/filebeat-oss:7.9.2 \\\n    setup -E setup.kibana.host=elk:5601 \\\n          -E output.elasticsearch.hosts='[\"elk:9200\"]'\n

Create filebeat.mongooseim.yml config file:

filebeat.inputs:\n- paths:\n   - /usr/lib/mongooseim/log/mongooseim.json.1\n  input_type: log\n  json.keys_under_root: true\n  json.add_error_key: true\n  json.overwrite_keys: true\n\nprocessors:\n  # Keep the original \"when\" field too, because of microseconds precision\n  - timestamp:\n      field: when\n      layouts:\n        # Date '2006-01-02T15:04:05.999Z' in mongoose format\n        - '2006-01-02T15:04:05.999+00:00'\n      test:\n        - '2020-09-29T11:25:51.925316+00:00'\n

Create a volume for persistent Filebeat data (so, it would not insert log duplicates, if mongooseim-filebeat container is recreated):

docker volume create filebeat-data\n

Actually run the Filebeat daemon:

docker run -d \\\n    --network logging \\\n    --name mongooseim-filebeat \\\n    -v mongooseim-logs:/usr/lib/mongooseim/log \\\n    -v filebeat-data:/usr/share/filebeat/data \\\n    -v=\"$(pwd)/filebeat.mongooseim.yml:/usr/share/filebeat/filebeat.yml:ro\" \\\n    docker.elastic.co/beats/filebeat-oss:7.9.2 \\\n    filebeat -e -E output.elasticsearch.hosts='[\"elk:9200\"]'\n

In case you want to store and view logs from a dev server in Elasticsearch:

docker run -d \\\n    --network logging \\\n    --name mongooseim-filebeat \\\n    -v \"$(pwd)/_build/mim1/rel/mongooseim/log:/usr/lib/mongooseim/log\" \\\n    -v=\"$(pwd)/priv/filebeat.mongooseim.yml:/usr/share/filebeat/filebeat.yml:ro\" \\\n    docker.elastic.co/beats/filebeat-oss:7.9.2 \\\n    filebeat -e -E output.elasticsearch.hosts='[\"elk:9200\"]'\n
"},{"location":"operation-and-maintenance/MongooseIM-metrics/","title":"MongooseIM metrics","text":"

MongooseIM by default collects many metrics showing the user behaviour and general system statistics. They are managed by exometer. MongooseIM uses ESL's fork of this project.

All metrics are divided into the following groups:

  • Per host type metrics: Gathered separately for every host type supported by the cluster.

    Warning

    If a cluster supports many (thousands or more) host types, performance issues might occur. To avoid this, use global equivalents of the metrics with all_metrics_are_global config option.

    • Hook metrics. They are created for every hook and incremented on every call to it.
  • Global metrics: Metrics common for all host types.

    • Data metrics. These are misc. metrics related to data transfers (e.g. sent and received stanza size statistics).
    • VM metrics. Basic Erlang VM statistics.
  • Backend metrics: Histograms with timings of calls to various backends.
"},{"location":"operation-and-maintenance/MongooseIM-metrics/#metrics-types","title":"Metrics types","text":""},{"location":"operation-and-maintenance/MongooseIM-metrics/#spiral","title":"spiral","text":"

This kind of metric provides 2 values: total event count (e.g. stanzas processed) and a value in 60s window (one value). Dividing one value by 60 provides an average per-second value over last minute.

Example: [{total, 1000}, {one, 20}]

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#value","title":"value","text":"

A simple value. It is actually a one-element proplist: [{value, N}].

Example: [{value, 256}]

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#gauge","title":"gauge","text":"

It is similar to a value type but consists of two properties:

  • value
  • ms_since_reset - Time in milliseconds elapsed from the last metric update.

Example: [{value, 12}, {ms_since_reset, 91761}]

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#proplist","title":"proplist","text":"

A metric which is a nonstandard proplist. You can find the lists of keys in metrics descriptions.

Example: [{total,295941736}, {processes_used,263766824}, {atom_used,640435}, {binary,1513152}, {ets,3942592}, {system,32182072}]

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#histogram","title":"histogram","text":"

A histogram collects values over a sliding window of 60s and exposes the following stats:

  • n - A number of samples.
  • mean - An arithmetic mean.
  • min
  • max
  • median
  • 50, 75, 90, 95, 99, 999 - 50th, 75th, 90th, 95th, 99th and 99.9th percentile
"},{"location":"operation-and-maintenance/MongooseIM-metrics/#per-host-type-metrics","title":"Per host type metrics","text":""},{"location":"operation-and-maintenance/MongooseIM-metrics/#hook-metrics","title":"Hook metrics","text":"

There are more hook metrics than what is listed in this table, because they are automatically created for every new hook. As a result it makes more sense to maintain a list of the most relevant or useful items, rather than keeping this table fully in sync with the code.

Name Type Description (when it gets incremented) [HostType, anonymous_purge] spiral An anonymous user disconnects. [HostType, disco_info] spiral An information about the server has been requested via Disco protocol. [HostType, disco_local_features] spiral A list of server features is gathered. [HostType, disco_local_identity] spiral A list of server identities is gathered. [HostType, disco_local_items] spiral A list of server's items (e.g. services) is gathered. [HostType, disco_sm_features] spiral A list of user's features is gathered. [HostType, disco_sm_identity] spiral A list of user's identities is gathered. [HostType, disco_sm_items] spiral A list of user's items is gathered. [HostType, mam_lookup_messages] spiral An archive lookup is performed. [HostType, offline_message] spiral A message was sent to an offline user. (Except for \"error\", \"headline\" and \"groupchat\" message types.) [HostType, offline_groupchat_message] spiral A groupchat message was sent to an offline user. [HostType, privacy_updated_list] spiral User's privacy list is updated. [HostType, resend_offline_messages] spiral A list of offline messages is gathered for delivery to a user's new connection. [HostType, roster_get_subscription_lists] spiral Presence subscription lists (based on which presence updates are broadcasted) are gathered. [HostType, roster_in_subscription] spiral A presence with subscription update is processed. [HostType, roster_out_subscription] spiral A presence with subscription update is received from a client. [HostType, sm_broadcast] spiral A stanza is broadcasted to all of user's resources. [HostType, unset_presence] spiral A user disconnects or sends an unavailable presence."},{"location":"operation-and-maintenance/MongooseIM-metrics/#presences-rosters","title":"Presences & rosters","text":"Name Type Description (when it gets incremented) [HostType, modPresenceSubscriptions] spiral Presence subscription is processed. [HostType, modPresenceUnsubscriptions] spiral Presence unsubscription is processed. [HostType, modRosterGets] spiral User's roster is fetched. [HostType, modRosterPush] spiral A roster update is pushed to a single session. [HostType, modRosterSets] spiral User's roster is updated."},{"location":"operation-and-maintenance/MongooseIM-metrics/#privacy-lists","title":"Privacy lists","text":"Name Type Description (when it gets incremented) [HostType, modPrivacyGets] spiral IQ privacy get is processed. [HostType, modPrivacyPush] spiral Privacy list update is sent to a single session. [HostType, modPrivacySets] spiral IQ privacy set is processed. [HostType, modPrivacySetsActive] spiral Active privacy list is changed. [HostType, modPrivacySetsDefault] spiral Default privacy list is changed. [HostType, modPrivacyStanzaAll] spiral A packet is checked against the privacy list. [HostType, modPrivacyStanzaDenied] spiral Privacy list check resulted in deny. [HostType, modPrivacyStanzaBlocked] spiral Privacy list check resulted in block."},{"location":"operation-and-maintenance/MongooseIM-metrics/#other","title":"Other","text":"Name Type Description (when it gets incremented) [HostType, sessionAuthFails] spiral A client failed to authenticate. [HostType, sessionCount] counter Number of active sessions. [HostType, sessionLogouts] spiral A client session is closed. [HostType, sessionSuccessfulLogins] spiral A client session is opened. [HostType, xmppErrorIq] spiral An error IQ is sent to a client. [HostType, xmppErrorMessage] spiral An error message is sent to a client. [HostType, xmppErrorPresence] spiral An error presence is sent to a client. [HostType, xmppErrorTotal] spiral A stanza with error type is routed. [HostType, xmppMessageBounced] spiral A service-unavailable error is sent, because the message recipient if offline. [HostType, xmppIqSent] spiral An IQ is sent by a client. [HostType, xmppMessageSent] spiral A message is sent by a client [HostType, xmppPresenceSent] spiral A presence is sent by a client. [HostType, xmppStanzaSent] spiral A stanza is sent by a client. [HostType, xmppIqReceived] spiral An IQ is sent to a client. [HostType, xmppMessageReceived] spiral A message is sent to a client. [HostType, xmppPresenceReceived] spiral A presence is sent to a client. [HostType, xmppStanzaReceived] spiral A stanza is sent to a client. [HostType, xmppStanzaCount] spiral A stanza is sent to and by a client. [HostType, xmppStanzaDropped] spiral A stanza is dropped due to an AMP rule or a filter_packet processing flow."},{"location":"operation-and-maintenance/MongooseIM-metrics/#extension-specific-metrics","title":"Extension-specific metrics","text":"

Metrics specific to an extension, e.g. Message Archive Management, are described in respective module documentation pages.

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#global-metrics","title":"Global metrics","text":"Name Type Description (when it gets incremented) [global, routingErrors] spiral It is not possible to route a stanza (all routing handlers failed). [global, nodeSessionCount] value A number of sessions connected to a given MongooseIM node. [global, totalSessionCount] value A number of sessions connected to a MongooseIM cluster. [global, uniqueSessionCount] value A number of unique users connected to a MongooseIM cluster (e.g. 3 sessions of the same user will be counted as 1 in this metric). [global, cache, unique_sessions_number] gauge A cached value of uniqueSessionCount. It is automatically updated when a unique session count is calculated. [global, nodeUpTime] value Node uptime. [global, clusterSize] value A number of nodes in a MongooseIM cluster seen by a given MongooseIM node (based on Mnesia). For CETS use global.cets.system.joined_nodes instead. [global, tcpPortsUsed] value A number of open tcp connections. This should relate to the number of connected sessions and databases, as well as federations and http requests, in order to detect connection leaks. [global, processQueueLengths] probe The number of queued messages in the internal message queue of every erlang process, and the internal queue of every fsm (ejabberd_s2s). This is sampled every 30 seconds asynchronously. It is a good indicator of an overloaded system: if too many messages are queued at the same time, the system is not able to process the data at the rate it was designed for."},{"location":"operation-and-maintenance/MongooseIM-metrics/#data-metrics","title":"Data metrics","text":"Metric name Type Description [global, data, xmpp, received, xml_stanza_size] histogram A size (in bytes) of a received stanza after decryption. [global, data, xmpp, sent, xml_stanza_size] histogram A size (in bytes) of a sent stanza before encryption. [global, data, xmpp, received, c2s, tcp] spiral A size (in bytes) of unencrypted data received from a client via TCP channel. [global, data, xmpp, sent, c2s, tcp] spiral A size (in bytes) of unencrypted data sent to a client via TCP channel. [global, data, xmpp, received, c2s, tls] spiral A size (in bytes) of a data received from a client via TLS channel after decryption. [global, data, xmpp, sent, c2s, tls] spiral A size (in bytes) of a data sent to a client via TLS channel before encryption. [global, data, xmpp, received, c2s, bosh] spiral A size (in bytes) of a data received from a client via BOSH connection. [global, data, xmpp, sent, c2s, bosh] spiral A size (in bytes) of a data sent to a client via BOSH connection. [global, data, xmpp, received, c2s, websocket] spiral A size (in bytes) of a data received from a client via WebSocket connection. [global, data, xmpp, sent, c2s, websocket] spiral A size (in bytes) of a data sent to a client via WebSocket connection. [global, data, xmpp, received, s2s] spiral A size (in bytes) of a data received via TCP and TLS (after decryption) Server-to-Server connections. [global, data, xmpp, sent, s2s] spiral A size (in bytes) of a data sent via TCP and TLS (before encryption) Server-to-Server connections. [global, data, xmpp, received, component] spiral A size (in bytes) of a data received from XMPP component. [global, data, xmpp, sent, component] spiral A size (in bytes) of a data sent to XMPP component. [HostType, data, xmpp, c2s, message, processing_time] histogram Processing time for incomming c2s stanzas. [global, data, dist] proplist Network stats for an Erlang distributed communication. A proplist with values: recv_oct, recv_cnt, recv_max, send_oct, send_max, send_cnt, send_pend, connections. [global, data, rdbms, PoolName] proplist For every RDBMS pool defined, an instance of this metric is available. It is a proplist with values workers, recv_oct, recv_cnt, recv_max, send_oct, send_max, send_cnt, send_pend."},{"location":"operation-and-maintenance/MongooseIM-metrics/#cets-system-metrics","title":"CETS system metrics","text":"Metric name Type Description [global, cets, system] proplist A proplist with a list of stats. Description is below. Stat Name Description available_nodes Available nodes (nodes that are connected to us and have the CETS disco process started). unavailable_nodes Unavailable nodes (nodes that do not respond to our pings). joined_nodes Joined nodes (nodes that have our local tables running). discovered_nodes Discovered nodes (nodes that are extracted from the discovery backend). remote_nodes_without_disco Nodes that have more tables registered than the local node. remote_nodes_with_unknown_tables Nodes with unknown tables. remote_unknown_tables Unknown remote tables. remote_nodes_with_missing_tables Nodes that are available, but do not host some of our local tables. remote_missing_tables Nodes that replicate at least one of our local tables to a different list of nodes. conflict_nodes Nodes that replicate at least one of our local tables to a different list of nodes. conflict_tables Tables that have conflicting replication destinations. discovery_works Returns 1 if the last discovery attempt is successful (otherwise returns 0)."},{"location":"operation-and-maintenance/MongooseIM-metrics/#vm-metrics","title":"VM metrics","text":"Metric name Type Description [global, erlang, memory] proplist A proplist with total, processes_used, atom_used, binary, ets and system memory stats. [global, erlang, system_info] proplist A proplist with port_count, port_limit, process_count, process_limit, ets_limit stats."},{"location":"operation-and-maintenance/MongooseIM-metrics/#backend-metrics","title":"Backend metrics","text":"

Some extension modules expose histograms with timings of calls made to their backends. Please check the documentation of modules that are enabled in your config file, in order to learn if they provide them.

All module backend metrics names use the following convention: [global, backends, Module, BackendAction] and [global, backends, Module, BackendAction, count]. The former is a histogram of operation times. However, the time is not recorded if a backend operation exits with an exception. The latter is a number of calls (spiral metric), incremented for every call (even a failed one).

Besides these, following authentication metrics are always available:

  • [HostType, backends, auth, authorize]
  • [HostType, backends, auth, check_password]
  • [HostType, backends, auth, try_register]
  • [HostType, backends, auth, does_user_exist]

These are total times of respective operations. One operation usually requires only a single call to an auth backend but sometimes with e.g. 3 backends configured, the operation may fail for first 2 backends. In such case, these metrics will be updated with combined time of 2 failed and 1 successful request.

Additionally, the RDBMS layer in MongooseIM exposes two more metrics, if RDBMS is configured:

  • [global, backends, mongoose_rdbms, query] - Execution time of a \"simple\" (not prepared) query by a DB driver.
  • [global, backends, mongoose_rdbms, execute] - Execution time of a prepared query by a DB driver.
"},{"location":"operation-and-maintenance/Rolling-upgrade/","title":"Rolling upgrade","text":""},{"location":"operation-and-maintenance/Rolling-upgrade/#rolling-upgrade","title":"Rolling upgrade","text":"

For all MongooseIM production deployments we recommend running multiple server nodes connected in a cluster behind a load-balancer. Rolling upgrade is a process of upgrading MongooseIM cluster, one node at a time. Make sure you have at least the number of nodes able to handle your traffic plus one before the rolling upgrade to guarantee the availability and minimise the downtime. Running different MongooseIM versions at the same time beyond the duration of the upgrade is not recommended and not supported.

Rolling upgrade procedure is recommended over configuration reload which is not supported since version 4.1.

Please note that more complex upgrades that involve schema updates, customisations or have functional changes might require more specific and specially crafted migration procedure.

If you want just to make the changes to the configuration file, please follow steps 1, 3, 4, 6, 7, 8. This type of change can also be done one node at a time. It would require you to check the cluster status, modify the configuration file and restart the node.

The usual MongooseIM cluster upgrade can be achieved with the following steps:

"},{"location":"operation-and-maintenance/Rolling-upgrade/#1-check-the-cluster-status","title":"1. Check the cluster status.","text":"

Use the following command on the running nodes and examine the status of the cluster:

mongooseimctl mnesia info | grep \"running db nodes\"\n\nrunning db nodes = [mongooseim@node1, mongooseim@node2]\n

This command shows all running nodes. A healthy cluster should list all nodes that are part of the cluster.

Should you have any issues related to node clustering, please refer to Cluster configuration and node management section.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#2-copy-the-configuration-file","title":"2. Copy the configuration file.","text":"

Make a copy of the configuration file before the upgrade, as some package managers might override your custom configuration with the default one. Please note that since version 4.1 *.cfg MongooseIM configuration format is no longer supported and needs to be rewritten in the new *.toml format.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#3-apply-the-changes-from-the-migration-guide","title":"3. Apply the changes from the migration guide.","text":"

All modifications of the configuration file or updates of the database schema, that are required to perform version upgrade, can be found in the Migration Guide section. When upgrading more than one version, please make sure to go over all consecutive migration guides.

For example, when migrating from MongooseIM 3.7 to 4.1, please familiarize yourself with and apply all necessary changes described in the following pages of the Migration Guide section.

  • 3.7.0 to 4.0.0
  • 4.0.0 to 4.0.1
  • 4.0.1 to 4.1.0
"},{"location":"operation-and-maintenance/Rolling-upgrade/#4-stop-the-running-node","title":"4. Stop the running node.","text":"

Use the following command to stop the MongooseIM node:

mongooseimctl stop\n
"},{"location":"operation-and-maintenance/Rolling-upgrade/#5-install-new-mongooseim-version","title":"5. Install new MongooseIM version.","text":"

You can get the new version of MongooseIM by either building MongooseIM from source code or downloading and upgrading from package.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#6-start-the-node","title":"6. Start the node.","text":"

Use the following command to start and check the status of the MongooseIM node and the cluster:

mongooseimctl start\nmongooseimctl status\n\nmongooseimctl mnesia info | grep \"running db nodes\"\n
"},{"location":"operation-and-maintenance/Rolling-upgrade/#7-test-the-cluster","title":"7. Test the cluster.","text":"

Please verify that the nodes are running and part of the same cluster. If the cluster is working as expected, the migration of the node is complete.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#8-upgrade-the-remaining-nodes","title":"8. Upgrade the remaining nodes.","text":"

Once all the prior steps are completed successfully, repeat the process for all nodes that are part of the MongooseIM cluster.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#further-cluster-upgrade-considerations","title":"Further cluster upgrade considerations","text":"

Another way to perform a cluster upgrade with minimising possible downtime would be to setup a parallel MongooseIM cluster running newer version. You can redirect the incoming traffic to the new cluster with use of a load-balancer.

Once no connections are handled by the old cluster, it can by safely stopped and the migration is complete.

We highly recommend testing new software release in staging environment before it is deployed on production.

Should you need any help with the upgrade, deployments or load testing of your MongooseIM cluster, please reach out to us. MongooseIM consultancy and support is part of our commercial offering.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/","title":"System Metrics Privacy Policy","text":""},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#introduction","title":"Introduction","text":"

MongooseIM system metrics are gathered to analyse the trends and needs of our users, improve MongooseIM, and let us know where to focus our efforts. This section is devoted to explaining how to customise, read, enable and disable collecting of the system metrics.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#consent","title":"Consent","text":"

To ensure transparency, a log message is generated on every MongooseIM node start (unless the metrics service is configured with the report option) to show that the functionality is enabled. The user is being notified that the metrics are gathered and has the right to withdraw the consent at any time without limiting the functionality of the product. For more information on how to disable this feature, please see the Services section.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#what-information-is-being-gathered","title":"What information is being gathered?","text":"

When introducing this feature, it is crucial for us to be fully transparent as to what information is being gathered. In general, we capture information on how MongooseIM is being used, its version and the chosen feature set. We only report the names of known modules and APIs that are part of the opensource product. All additional customisations are simply counted without disclosing any specific details. The user can view all the information that is shared in two different ways. The log file system_metrics_report.json contains the most recent report that was sent. Additionally, the user can configure the Tracking ID to use their own Google Analytics account and have a view of their MongooseIM status in that dashboard. For more information on how to set up the Tracking ID, please see How to configure additional and private Tracking ID in Google Analytics.

The full list of information that is being gathered can be seen below:

  • MongooseIM node uptime.
  • MongooseIM version.
  • Number of nodes that are part of the MongooseIM cluster.
  • Generic modules that are part of the opensource project and are in use. Some modules report what database they use as a backend, e.g. Sample report.
  • Number of custom modules - without disclosing any details, we are just curious to see if there are any.
  • Number of connected external XMPP components.
  • List of configured REST APIs that are part of the opensource project.
  • XMPP transport mechanisms like, TCP/TLS, WebSockets or BOSH.
  • Geographical Data - Google Analytics is providing several geographical dimensions, such as City, Country, Continent. These values are derived from the IP address the data was sent from. See About Geographical Data for more details.
"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-is-the-information-being-used","title":"How is the information being used?","text":"

The information collected is automatically anonymised before it is being processed any further. Each MongooseIM is randomly generating a Client ID that is being attached to the reports. The collected data has only statistical relevance and aims to help us understand the needs of our users. Knowing how our product is used will allow us to identify the core value it brings to the users. It will point out the direction in which to expand it and show us how to target our further efforts developing it.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-does-a-report-look-like","title":"How does a report look like?","text":"

A sample report showing metrics for the mod_vcard backends from Google Analytics can be found below.

Based on such report we can see the frequency of different backends being used with mod_vcard.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-often-are-the-metrics-reported","title":"How often are the metrics reported?","text":"

Metrics are reported first shortly after the system startup and later at regular intervals. These timers are configurable using the initial_report and periodic_report parameters. The default values are 5 minutes for the initial report and 3 hours for the periodic one. These reporting intervals can be changed depending on the configuration parameters.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-to-configure-this-service","title":"How to configure this service?","text":"

This functionality is provided as a \"service\". For more details regarding service configuration, please see the Services section.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-to-configure-additional-and-private-tracking-id-in-google-analytics","title":"How to configure additional and private Tracking ID in Google Analytics?","text":"

The data is gathered and forwarded to Google Analytics. The user can add custom Google Analytics Tracking ID in the MongooseIM configuration and see all incoming events that are related to their own system metrics. For more details on how to create or sign in to the Google Analytics account, please see Get Started with Analytics.

Tracking ID is a property identification code that all collected data is associated with. It is determining the destination where the collected data is sent. To create a new Tracking ID, please follow the steps below:

Warning

MongooseIM no longer supports Universal Analytics. To use metrics it is needed to create an instance of Google Analytics 4.

  • Go to the Admin tab of your user dashboard.
  • Create a new account with + Create Account.
  • Add new property with + Create Property.
    • Within the new property go to Data Streams > Add stream > Web.
    • After successful creation, the ID can be found in the top right corner of the section and has the following format G-XXXX and is named Measurement ID.
  • To create an API secret, in a Data Stream view go to Event > Measurement Protocol API secrets and use the Create button in the top right corner to create a new secret.
"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#example-configuration","title":"Example configuration","text":"

New Tracking ID can be added to the list of options

[services.service_mongoose_system_metrics]\n  initial_report = 300_000\n  periodic_report = 10_800_000\n  tracking_id.id = \"G-XXXX\"\n  tracking_id.secret = \"Secret\"\n

For more details regarding service configuration, please see Services section.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#data-sharing-policy","title":"Data Sharing Policy","text":"

For more information on how Google Analytics collects and processes data, please see Google Privacy & Terms. Google Analytics is being used due to the ease of host and display reporting information. We will not share any user specific information with further third parties not mentioned in this document. Some insight into the statistical significance regarding our findings from the bulk data collected, has been shared as a blog post on our website.

"},{"location":"operation-and-maintenance/gdpr-considerations/","title":"GDPR considerations","text":"

This page describes what GDPR implies in terms of server management.

"},{"location":"operation-and-maintenance/gdpr-considerations/#data-affected-by-gdpr-commands","title":"Data affected by GDPR commands","text":"
  • inbox - All entries in the subject's inbox. If their messages are stored in other users' inbox, they will not be removed.
  • message archive - Same as above for 1-1 messages. In case of group chat messages, they are retrieved as personal data but not removed.
  • offline storage - All messages stored for delivery.
  • roster - All entries in the subject's roster. Other users' rosters are NOT affected, even if they include the subject's JID or other data.
  • vCard - The entire content of the subject's vCard.
  • private XML storage - All items stored by the subject will be removed.
  • publish-subscribe
    • retrieval: all subject's subscriptions and nodes (with their payloads included).
    • removal: subject's subscriptions, push and PEP nodes (with their data included).
"},{"location":"operation-and-maintenance/gdpr-considerations/#gdpr-cli-commands","title":"GDPR CLI commands","text":"

All CLI commands are accessible via the mongooseimctl command, located in the bin/ directory inside the MIM release.

"},{"location":"operation-and-maintenance/gdpr-considerations/#creating-a-gdpr-safe-user-account","title":"Creating a GDPR-safe user account","text":"

mongooseimctl account registerUser --domain <domain> --password <password>

This command will create an anonymised JID with a random username part. It ensures that no personal information will be leaked via logs or database entries, which include the user's JID.

"},{"location":"operation-and-maintenance/gdpr-considerations/#example","title":"Example","text":"
$ mongooseimctl account registerUser --domain localhost --password secret\n{\n  \"data\" : {\n    \"account\" : {\n      \"registerUser\" : {\n        \"message\" : \"User 1661-175924-881845-449bca06515e060a@localhost successfully registered\",\n        \"jid\" : \"1661-175924-881845-449bca06515e060a@localhost\"\n      }\n    }\n  }\n}\n
"},{"location":"operation-and-maintenance/gdpr-considerations/#retrieval-of-personal-data","title":"Retrieval of Personal Data","text":"

mongooseimctl gdpr retrievePersonalData --username <username> --domain <domain> --resultFilepath <filepath for the output as a zip>

It retrieves personal data accessible to the server (see \"Technical limitations\" section below). The directory where the zip file will be created must already exist.

After the execution is complete, a zip file will appear in the specified folder with personal information in CSV files grouped by type.

"},{"location":"operation-and-maintenance/gdpr-considerations/#example_1","title":"Example","text":"
$ mongooseimctl gdpr retrievePersonalData --username 1661-175924-881845-449bca06515e060a --domain localhost --resultFilepath /home/mongooseim/gdpr/1661-175924-881845-449bca06515e060a.zip\n
"},{"location":"operation-and-maintenance/gdpr-considerations/#removal-of-personal-data","title":"Removal of Personal Data","text":"

mongooseimctl account removeUser --user <jid>

It removes the user's account along with all associated personal data accessible to the server (see \"Technical limitations\" section below).

"},{"location":"operation-and-maintenance/gdpr-considerations/#example_2","title":"Example","text":"
$ mongooseimctl account removeUser --user 1661-175924-881845-449bca06515e060a@localhost\n{\n  \"data\" : {\n    \"account\" : {\n      \"removeUser\" : {\n        \"message\" : \"User 1661-175924-881845-449bca06515e060a@localhost successfully unregistered\",\n        \"jid\" : \"1661-175924-881845-449bca06515e060a@localhost\"\n      }\n    }\n  }\n}\n
"},{"location":"operation-and-maintenance/gdpr-considerations/#technical-limitations-of-gdpr-retrieval-and-removal","title":"Technical limitations of GDPR retrieval and removal","text":"

Both GDPR retrieval and removal will process the data available via configured extensions and database(s). If a part of personal information is managed by an extension that is e.g. temporarily disabled, it won't be retrieved/deleted.

If any MIM extension you had enabled on production is now disabled or you've switched one of them (or e.g. auth module) to another database, it is possible that some personal data will not be retrieved or removed as expected. In such case, please consider starting a separate MIM instance that is configured to access all places, where personal data may be stored. You may also extract the missing pieces of information on your own, however we won't cover the details of this method in this guide.

Please also visit Known issues page to learn about a mod_mam_muc issue that may manifest in some environments.

"},{"location":"operation-and-maintenance/known-issues/","title":"Known issues","text":"

This document provides a list of all known issues with MongooseIM operation and configuration. You may also find proposed workarounds if any are available.

"},{"location":"operation-and-maintenance/known-issues/#missing-muc-light-room-config-fields-with-rdbms-backend","title":"Missing MUC Light room config fields with RDBMS backend","text":"

Before MongooseIM 3.5.x (incl.) new MUC Light rooms could be created with some config fields absent in the RDBMS table. These options couldn't be re-added later by changing the room config via requests from the clients.

It happened when the default config was a subset of the schema, and the client hasn't provided these values when a room was created.

Please note that this issue was resolved from MIM 3.6.0 onwards as the default_config option was deleted.

"},{"location":"operation-and-maintenance/known-issues/#how-to-fix-this","title":"How to fix this?","text":"

You have to iterate over all rooms in the DB (muc_light_rooms table) and add missing entries to the muc_light_config table. Every option is inserted as a separate row and is stored as plain text, so it should be straightforward.

Let's say you were using the following config in mongooseim.cfg:

{config_schema, [\n                 \"roomname\",\n                 \"subject\",\n                 \"background\",\n                 \"notification_sound\"\n                ]},\n{default_config, [\n                  {\"roomname\", \"The room\"},\n                  {\"subject\", \"Chit-chat\"}\n                 ]}\n

Your client application has created some rooms without the background option by mistake.

For every id in the muc_light_rooms table, you need to execute:

INSERT INTO muc_light_config(room_id, opt, val) VALUES ('put id here', 'background', 'new default value');\n
"},{"location":"operation-and-maintenance/known-issues/#mssql-connectivity-via-odbc","title":"MSSQL connectivity via ODBC","text":"

We have observed some issues with he ODBC driver used by MongooseIM in the past. The problems should now be resolved, and MSSQL is verified to work on Ubuntu 20.04.2 LTS.

"},{"location":"operation-and-maintenance/known-issues/#gdpr-retrieval-for-mam-muc-limitation","title":"GDPR retrieval for MAM MUC limitation","text":"

When the personal data retrieval is executed for a user in a specific domain, Message Archive Management for groupchats must be running for this particular domain. This is the case for most configurations, but the problem manifests when a MongooseIM operator configures mod_mam_muc/mod_mam to start only for a subset of domains supported by the cluster (host_config option).

In such case, personal data stored by MAM MUC will not be retrieved for this user.

"},{"location":"operation-and-maintenance/known-issues/#proposed-workaround","title":"Proposed workaround","text":"

Start a dedicated MongooseIM instance with a slightly different config, which enables Message Archive Management for the user's domain. This instance doesn't have to be clustered with other nodes and doesn't have to be accessible for actual users.

After a successful retrieval, this instance may be terminated and deleted if necessary.

"},{"location":"operation-and-maintenance/tls-distribution/","title":"Distribution over TLS","text":"

It's possible to use TLS for communication between MongooseIM cluster nodes. To enable it, find the directory of your release, below it look for etc/vm.dist.args and, inside the file, the section about the distribution protocol:

## Use TLS for connections between Erlang cluster members.\n## Don't forget to override the paths to point to your certificate(s) and key(s)!\n## Once a connection is established, Erlang doesn't differentiate between\n## a client and a server - the same certs/keys can be used on both sides.\n#-proto_dist inet_tls\n#-ssl_dist_opt server_certfile   /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/fake_cert.pem client_certfile   /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/fake_cert.pem\n#              server_keyfile    /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/fake_key.pem  client_keyfile    /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/fake_key.pem\n#              server_cacertfile /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/cacert.pem    client_cacertfile /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/cacert.pem\n#              client_verify     verify_peer\n#              server_verify     verify_peer\n#              server_fail_if_no_peer_cert true\n

By default, the proto_dist as well as the following options for configuring the cluster member are commented out. Enable them and provide the correct paths to your CA certificate, server certificate and server key.

There's a number of caveats to remember about when running Erlang distribution over TLS:

  • TLS-enabled and non-TLS Erlang nodes can't communicate with one another. Remember about it when trying to run erl -[s]name ... and communicating with the server.

  • Establishing a TLS connection will fail if a certificate isn't found in the specified location. You might receive a log message indicating that when nodes try to connect:

    2017-03-10 16:16:03.844 [warning] <0.4218.2> global: mongooseim@localhost failed to connect to fed1@localhost\n

    If the pointed-at certificate/key/CA-certificate file doesn't exist, it won't be reported before trying to connect. Look for (grep) the log message on all cluster nodes, as the message doesn't have to appear on all nodes if a connection fails.

  • You can switch a cluster from running non-TLS distribution, to TLS distribution by shutting down a node, enabling TLS on it, starting it up again, and repeating the steps for each remaining node. Again, nodes with and without TLS enabled won't be able to communicate with one another.

  • It's possible to fortify an Erlang cluster further than the Mongoose's preconfigured vm.dist.args does. This includes: checking certificate revocation status against a CA's Certificate Revocation List, securing/disabling EPMD (Erlang Port Mapper Daemon), using custom certificate verification functions. For details on these steps please refer to Erlang Distribution over TLS and Erlang (and Elixir) distribution without epmd.

"},{"location":"rest-api/Administration-backend/","title":"MongooseIM's REST API for backend administration","text":""},{"location":"rest-api/Administration-backend/#configuration","title":"Configuration","text":"

To enable the commands, you need to hook the mongoose_admin_api module to an HTTP endpoint as described in the admin REST API handlers configuration section of the HTTP listeners documentation.

"},{"location":"rest-api/Administration-backend/#openapi-specifications","title":"OpenAPI specifications","text":"

Read the Swagger documentation for more information.

"},{"location":"rest-api/Client-frontend/","title":"MongooseIM's REST API for frontend or client","text":"

In addition to the regular XMPP connection methods such as TCP (with TLS/STARTTLS), WebSockets and BOSH, MongooseIM provides parts of its functionality over a REST API.

"},{"location":"rest-api/Client-frontend/#assumptions","title":"Assumptions","text":"
  1. Every request has to be authenticated. Please see the Authentication section for more details.
  2. We strongly advise that this API is served over HTTPS.
  3. User registration has to be done via other methods (f.e. using the REST API for backend services).
  4. The relevant endpoint has to be configured on the server side. See the configuration section.
  5. A list of provided actions is documented with Swagger. See the specification.
"},{"location":"rest-api/Client-frontend/#authentication","title":"Authentication","text":"

MongooseIM uses Basic Authentication as an authentication method for the REST API.

Basic authentication is a simple authentication scheme built into the HTTP protocol. Each HTTP request to the client REST API has to contain the Authorization header with the word Basic followed by a space and a base64-encoded string username@host:password, where:

  • username@host is the user's bare JID,
  • password is the password used to register the user's account.

For example, to authorize as alice@localhost with the password secret, the client would send a header:

Authorization: Basic YWxpY2VAbG9jYWxob3N0OnNlY3JldA==\n
"},{"location":"rest-api/Client-frontend/#configuration","title":"Configuration","text":"

Handlers have to be configured as shown in the REST API configuration example to enable REST API.

In order to get the client REST API up and running simply copy the provided example. For more details about possible configuration parameters please see the relevant documentation of the HTTP listeners, in particular the client REST API handlers section.

"},{"location":"rest-api/Client-frontend/#smack-library-support","title":"Smack library support","text":"

REST API can fetch messages for Smack Stanza Properties.

For example if we have properties in the stanza like:

    <message xml:lang='en' to='alice@localhost' id='123' type='chat'>\n      <body xml:lang='en_US'>Hi!</body>\n      <thread parent='7edac73ab41e45c4aafa7b2d7b749080'>\n        e0ffe42b28561960c6b12b944a092794b9683a38\n      </thread>\n      <properties xmlns=\"http://www.jivesoftware.com/xmlns/xmpp/properties\">\n          <property>\n              <name>some_number</name>\n              <value type='integer'>123</value>\n          <property>\n          <property>\n              <name>some_string</name>\n              <value type='string'>abc</value>\n          <property>\n      </properties>\n    </message>\n
then in the final json message these properties will be converted to json map without tag names and all types will be taken as string:
    {   \"to\": \"alice@localhost\",\n        \"timestamp\": 1531329049949,\n        \"id\": \"123\",\n        \"from\": \"bob@localhost\",\n        \"body\": \"Hi!\",\n        \"thread\": \"e0ffe42b28561960c6b12b944a092794b9683a38\",\n        \"parent\": \"7edac73ab41e45c4aafa7b2d7b749080\",\n        \"properties\":{\n            \"some_number\":\"123\",\n            \"some_string\":\"abc\"\n        }\n    }\n

"},{"location":"rest-api/Client-frontend/#openapi-specifications","title":"OpenAPI specifications","text":"

See the Swagger documentation for more information.

"},{"location":"tutorials/CETS-configure/","title":"How to configure MongooseIM to use CETS instead of Mnesia","text":""},{"location":"tutorials/CETS-configure/#cets-config-example","title":"CETS Config Example","text":"

CETS is a library, which allows to replicate in-memory data across the MongooseIM cluster. It could be used to store:

  • information about online XMPP sessions;
  • information about outgoung S2S connections;
  • stream management session IDs;
  • information about online MUC rooms.

If you want to use CETS instead of Mnesia, ensure that these options are set:

[general]\n  sm_backend = \"cets\"\n  component_backend = \"cets\"\n  s2s_backend = \"cets\"\n\n[internal_databases.cets]\n\n# The list of modules that use CETS\n# You should enable only modules that you use\n[modules.mod_stream_management]\n  backend = \"cets\"\n\n[modules.mod_bosh]\n  backend = \"cets\"\n\n[modules.mod_muc]\n  online_backend = \"cets\"\n\n[modules.mod_jingle_sip]\n  backend = \"cets\"\n

Ensure that outgoing_pools are configured with RDBMS, so CETS could get a list of MongooseIM nodes, which use the same relational database and cluster them together.

A preferred way to install MongooseIM is Helm Charts on Kubernetes, so it allows to set volatileDatabase to cets and the values would be applied using Helm's templates

"},{"location":"tutorials/CETS-configure/#cets-with-the-file-discovery-backend","title":"CETS with the file discovery backend","text":"

It is possible to read a list of nodes to cluster from a file. But MongooseIM does not modify this file, so it is the task for the operator to update the file. But MongooseIM would reread the file without the restart:

[internal_databases.cets]\n    backend = \"file\"\n    node_list_file = \"/etc/mongooseim/mongooseim_nodes.txt\"\n

And the format of the node_list_file file is a new line separated list of nodes:

mongooseim@host1.example.com\nmongooseim@host2.example.com\nmongooseim@host3.example.com\n

File backend for CETS is only useful if you do not use an RDBMS database. You could use some external script to get the list of nodes from the AWS CLI command or some other way.

"},{"location":"tutorials/Docker-build/","title":"How to build and run MongooseIM docker image","text":"

The instruction below will guide you through the process of building and running the MongooseIM docker image.

"},{"location":"tutorials/Docker-build/#requirements","title":"Requirements","text":"

To follow this guide you need to have docker installed and the MongooseIM GitHub repository cloned locally.

"},{"location":"tutorials/Docker-build/#building-docker-image","title":"Building docker image","text":"

To build a MongooseIM image, navigate to the main repo directory (referenced as $REPO in this guide) and execute:

./tools/build-docker-from-remote.sh\n

which will build a MongooseIM docker image based on the current local commit if it is available on the remote.

Alternatively, it is possible to build a docker image based on any commit available on remote (commit hash referenced as $COMMIT_HASH), by executing:

./tools/build-docker-from-remote.sh $COMMIT_HASH\n
"},{"location":"tutorials/Docker-build/#running-docker-image","title":"Running docker image","text":"

Full tutorial on running a docker image is available on mongooseim-docker GitHub. Here only simple and one-node configuration will be presented. In order to run it execute:

docker run -dt -h first-node --name first-node -e JOIN_CLUSTER=false mongooseim\n

which starts a single MongooseIM node named first-node.

"},{"location":"tutorials/How-to-build/","title":"How to build MongooseIM","text":"

Instructions provided in this page are verified for:

  • Rocky Linux 8
  • Ubuntu 16.04 LTS (Xenial)
  • Ubuntu 18.04 LTS (Bionic)
  • macOS 13.3 (Ventura)

For any other OS versions, the instructions should still work, however, some steps or file paths may be different.

"},{"location":"tutorials/How-to-build/#requirements","title":"Requirements","text":"

To compile MongooseIM you need:

Rocky/AlmaUbuntumacOS
  • Make: make,
  • C and C++ compiler: gcc, g++,
  • Erlang/OTP 26.0 or higher:
    • erlang EPEL package, or,
    • esl-erlang from Erlang Solutions website, or,
    • install using kerl,
  • OpenSSL 0.9.8 or higher, for STARTTLS, SASL and SSL encryption: openssl and openssl-devel,
  • ODBC library: unixODBC-devel,
  • Zlib 1.2.3 or higher: zlib-devel.
  • Make: make,
  • C and C++ compiler: gcc, g++,
  • Erlang/OTP 24.0 or higher:
    • erlang package, or,
    • esl-erlang from Erlang Solutions website, or,
    • install using kerl,
  • OpenSSL 0.9.8 or higher, for STARTTLS, SASL and SSL encryption: olibssl-dev,
  • ODBC library: unixodbc-dev,
  • Zlib 1.2.3 or higher: zlib1g-dev.
  • Make, C and C++ compiler: Xcode Command Line Tools,
  • Erlang/OTP 24.0 or higher:
    • erlang from Homebrew,
    • install using kerl,
  • OpenSSL 0.9.8 or higher, for STARTTLS, SASL and SSL encryption: openssl from Homebrew
  • ODBC library: unixodbc from Homebrew.
"},{"location":"tutorials/How-to-build/#preparing-the-environment","title":"Preparing the environment","text":"Rocky/AlmaUbuntumacOS

Please install the required dependencies:

sudo yum install git make zlib-devel openssl openssl-devel unixODBC-devel gcc gcc-c++\nwget https://binaries2.erlang-solutions.com/rockylinux/8/esl-erlang_26.2.4_1~rockylinux~8_x86_64.rpm\nsudo dnf -Uvh esl-erlang_26.2.4_1~rockylinux~8_x86_64.rpm\n

Now, please proceed to the \"Building\" section.

Please install the required dependencies:

sudo apt install git make zlib1g-dev libssl-dev unixodbc-dev gcc g++ erlang\n

Now, please proceed to the \"Building\" section.

Step 1

Install Homebrew to manage packages on your Mac. You may use a different package manager but you'll need to figure out the package names and file paths on your own.

Step 2

Install Xcode Command Line Tools.

xcode-select --install # install compilation tools\n

Step 3

Install dependencies with Brew.

brew install erlang openssl unixodbc\n

Step 4

Add OpenSSL paths to the compiler and linker environment variables:

export LDFLAGS=\"-L/usr/local/opt/openssl/lib\"\nexport CFLAGS=\"-I/usr/local/opt/openssl/include\"\n

Now, please proceed to the \"Building\" section.

"},{"location":"tutorials/How-to-build/#building","title":"Building","text":"

To compile MongooseIM, navigate to the main repo directory (referenced as $REPO in this guide) and execute:

make [rel]\n

rel is optional as it is the default target. This will download all dependencies, compile everything and build a prod release.

If a more advanced release is required (with only specific DB support, e.g. mysql or pgsql) or you want to set the prefix or user for the installation script please refer to the release configuration page in our documentation.

The make rel commands will generate a self-contained OTP system structure in the project's _build/prod/rel/mongooseim subdirectory. The contents of that directory are as follows:

  • bin - startup/administration scripts,
  • etc - configuration files,
  • lib - MongooseIM binary, header and runtime files,
  • var - spool directory,
  • log - log file directory,
  • releases - release files directory.
"},{"location":"tutorials/How-to-build/#running-mongooseim","title":"Running MongooseIM","text":"

To run MongooseIM from the project tree after compiling it, change the directory to $REPO/_build/prod/rel/mongooseim.

There you can use the mongooseim command line administration script to start and stop MongooseIM. For example, this command will start the server:

bin/mongooseim start\n

You can also run the server in interactive mode (drop into an Erlang shell):

bin/mongooseim live\n

There's also a tool called mongooseimctl to perform some operations on a running instance, e.g.:

$ bin/mongooseimctl status\nMongooseIM node mongooseim@localhost:\n    operating system pid: 3105\n    Erlang VM status: started (of: starting | started | stopping)\n    boot script status: started\n    version: 3.4.0-7-gaec944c92 (as mongooseim)\n    uptime: 0 days 00:00:12\n    distribution protocol: inet_tcp\n    logs:\n        log/mongooseim.log\n
"},{"location":"tutorials/How-to-build/#building-the-testing-target-and-running-tests","title":"Building the testing target and running tests","text":"

For testing purposes there's a different make target available:

make devrel\n

which will generate releases mim1, mim2, mim3, fed1, reg1 in $REPO/_build/ and prepare them for testing and generating coverage reports.

In order to learn how to execute tests, please consult Testing MongooseIM page.

"},{"location":"tutorials/ICE_tutorial/","title":"How to set up MongooseICE (ICE/TURN/STUN server)","text":""},{"location":"tutorials/ICE_tutorial/#introduction","title":"Introduction","text":""},{"location":"tutorials/ICE_tutorial/#who-is-this-document-for","title":"Who is this document for?","text":"

This tutorial presents our TURN/STUN server in action. You get to see how to set up and configure MongooseICE and examine a system utilising its many talents.

Are you in need of an application requiring NAT traversal? Want to see how a TURN and STUN server would handle it? Or maybe you just like to tinker with interesting technologies and experience setting them up first hand?

If that's the case, this tutorial is for you.

"},{"location":"tutorials/ICE_tutorial/#what-is-the-end-result-of-this-tutorial","title":"What is the end result of this tutorial?","text":"

At the end of the tutorial you will have a working environment with two peers, one sending a live video to another. The peer-to-peer communication will not be obstructed by any NATs that may occur in the background. The live video stream is only an example here - there are many possible use cases for peer-to-peer communication with NAT traversal. We chose to build an example application that shows video streaming, because it's vivid, catchy and fun.

"},{"location":"tutorials/ICE_tutorial/#what-do-i-need-to-begin","title":"What do I need to begin?","text":"

Before you begin you have to prepare an environment for setting up the components used in this tutorial. Here's a list of things you'll need: * One Android phone (or at least an Android emulator). The video player in this tutorial is available only as an Android application. * RaspberryPi or any other device that is able to run Elixir code. Oh, and also has ffmpeg installed. We are going to use use RaspberryPi 3, to give this tutorial a hint of IoT. * At least one machine with a public IPv4 address. It is necessary, because both MongooseIM and MongooseICE servers need to be accessible by all devices that are used in this demo system. You could use a private, local IP address, but then you would need to ensure that your phone and the RaspberryPi are behind some kind of a NAT relative to this IP address.

Note

The demo will probably work without the NAT, but then there is no point in setting up a TURN server.

We are going to use 2 VPS (Virtual Private Server) that are located somewhere far far away, both having public IPv4 address. Let's say MongooseICE is bound to 1.1.1.1, and MongooseIM to 2.2.2.2.

"},{"location":"tutorials/ICE_tutorial/#general-architecture-of-the-environment-built-with-this-tutorial","title":"General architecture of the environment built with this tutorial","text":"

This is the architecture of the system we are building:

As we know by now, MongooseIM is bound to 2.2.2.2/myxmpp.com and MongooseICE to 1.1.1.1. We also have a RaspberryPi that is connected to a private network (so is behind some NAT) and an Android phone that is connected to an LTE network and also is behind the carrier's NAT.

"},{"location":"tutorials/ICE_tutorial/#ice-notes","title":"ICE notes","text":"

The end result of this tutorial not only uses MongooseICE and MongooseIM servers but also uses custom version of Mangosta-Android and DemoStreamerICE. Both projects are custom modified and custom made respectively in order to showcase the video streaming using the data relay capabilities provided by MongooseICE. The streaming itself, along with the signalling protocol, were prepared only for the case of this demo and are not a part of the platform. Those components exist only to visualize what can be achieved with MongooseICE and what can be built on top of it.

"},{"location":"tutorials/ICE_tutorial/#setting-up-mongooseim-signalling","title":"Setting up MongooseIM (signalling)","text":"

The ICE is nothing without signalling. The signalling protocol itself can be designed specifically for the application that is being deployed or can be implemented based on some standards, e.g. Jingle. Here, we chose to implement the simplest signalling possible, i.e. sending relay addresses via XMPP messages. No matter if we decide to go with this approach or with Jingle, we can use the MongooseIM XMPP server as a transport layer for the signalling. In order to enable signalling we need an instance of MongooseIM running with the simplest configuration, since the only thing we need from it is to provide us with means to communicate between two peers.

"},{"location":"tutorials/ICE_tutorial/#configuration","title":"Configuration","text":"

You can find MongooseIM installation instructions on this page. Once you have it installed, you need to modify the mongooseim.toml config file:

[general]\n  hosts = [\"localhost\", \"myxmpp.com\"]\n
This sets the virtual hostname of the XMPP server, so that you can register users in this domain. After that, you can start MongooseIM with
mongooseimctl start\n

"},{"location":"tutorials/ICE_tutorial/#users","title":"Users","text":"

After we finish setting up MongooseIM, we need to register some users. For this demo we need two users: movie@myxmpp.com and phone@myxmpp.com, for RaspberryPi and the Android phone respectively. In order to do that, type:

mongooseimctl account registerUser --username movie --domain myxmpp.com --password xmpp_password\nmongooseimctl account registerUser --username phone --domain myxmpp.com --password xmpp_password\n

on the machine that has MongooseIM installed.

As you can see here, we have created those two users, both with the password xmpp_password for simplicity.

"},{"location":"tutorials/ICE_tutorial/#setting-up-mongooseice-turnstun-server","title":"Setting up MongooseICE (TURN/STUN server)","text":"

Now, since MongooseIM handles the signalling, we need the TURN relay and the STUN server to send peer-to-peer data. For that we are going to use the star of this tutorial - MongooseICE.

"},{"location":"tutorials/ICE_tutorial/#how-to-get-and-configure","title":"How to get and configure","text":"

The whole documentation that describes all options and deployment methods, can be found on the project's github page. Let's get to it! (this command assumes that we are on the server for MongooseICE and that it has Docker installed):

docker run -it --net=host -e \"MONGOOSEICE_UDP_RELAY_IP=1.1.1.1\" -e \"MONGOOSEICE_STUN_SECRET=secret\" -e \"MONGOOSEICE_UDP_REALM=myrelay\" mongooseim/mongooseice:0.4.0\n

This command starts the MongooseICE server in the Docker container, attaching its virtual network interface to the network interface of the host machine the Docker daemon is running on. There are three important configuration options we have to set via environment variables:

  • MONGOOSEICE_UDP_RELAY_IP - This is the IP address that MongooseICE provides data relay on. This should be set to public IPv4 address.
  • MONGOOSEICE_STUN_SECRET - This is a secret password that TURN clients need to provide to connect to this server.
  • MONGOOSEICE_UDP_REALM - This is just a name for your TURN relay.

And that's it! MongooseICE is now ready to roll!

"},{"location":"tutorials/ICE_tutorial/#setting-up-mangosta-android","title":"Setting up Mangosta-Android","text":""},{"location":"tutorials/ICE_tutorial/#how-to-get-and-install","title":"How to get and install","text":"

The source code of the video-stream-demo-enabled Mangosta-Android can be found on the ice_demo_kt branch. If you want to tinker with it and compile it yourself, you can do that. All you need is Android Studio 2.3+. The compilation is pretty straightforward, so I'm not going to explain it here. If you are interested in how it works, most of the code is in the inaka.com.mangosta.videostream package. If you don't want to compile this application from source, you can just install this .apk on your phone and that's it.

"},{"location":"tutorials/ICE_tutorial/#how-to-configure","title":"How to configure","text":"

Right after you start Mangosta-Android for the first time, you will need to login to your XMPP server. In order to do that, just enter the JID you have created for the phone (phone@myxmpp.com), the password (xmpp_password) and the server address (2.2.2.2 or myxmpp.com if you've set up the domain to actually point to this IP address), and then confirm by clicking \"Enter\".

After we log in, we can start setting up the connection to the MongooseICE server we set up before. The process is shown on the screenshots below.

On the \"Configure ICE\" screen we have to set 5 fields up:

  • TURN server address - IPv4 address of our MongooseICE
  • TURN Server port - since we did not set the port while configuring MongooseICE it uses a default one - 3478
  • TURN Realm - Realm name we have set via MONGOOSEICE_UDP_REALM variable. In our case it's \"myrelay\".
  • TURN username - Current version of MongooseICE ignores this, so you may leave it as is.
  • TURN password - The password that we have set via MONGOOSEICE_STUN_SECRET variable. In our case it's \"secret\"

And that would be all. Now you can click \"TEST CONNECTION\" to, well..., test the connection. If everything works, you can \"SAVE\" the settings. Now your Mangosta-Android is ready to play streamed video, but we still need the source...

"},{"location":"tutorials/ICE_tutorial/#setting-up-raspberrypi","title":"Setting up RaspberryPi","text":"

Let's configure the video source now. In our case it will be a RaspberryPi with Elixir and ffmpeg installed running our ICE demo application.

"},{"location":"tutorials/ICE_tutorial/#the-software","title":"The software","text":"

For this demo we provide a simple XMPP client that also is able to send live video stream using ffmpeg whenever other peer asks for it via XMPP. This client is written in Elixir, so we can run it from source quite easily.

"},{"location":"tutorials/ICE_tutorial/#how-to-get-and-configure_1","title":"How to get and configure","text":"

You can get the client's sources here. For now we only need to run it, so let's get to it (on our RaspberryPi):

git clone https://github.com/esl/ice_demo.git\ncd ice_demo\nmix deps.get\niex -S mix\n

After a while we should get into Elixir shell. In order to enable the streamer, we need to start it, providing some configuration options (in the Elixir shell):

opts = [\njid: \"movie@myxmpp.com\",\npassword: \"xmpp_password\",\nhost: \"myxmpp.com\",\nturn_addr: \"1.1.1.1:3784\"\nturn_username: \"username\",\nturn_secret: \"secret\",\nvideo_file: \"/home/pi/sintel.h264\"\n]\nICEDemo.start_movie(opts)\n

The first 3 options are all about connecting to the XMPP server - we use \"movie@myxmpp.com\" user that we created earlier. Next 3 options are about connecting to the MongooseICE server. Those are similar to ones we set in Mangosta-Android. The last one points to the video file that will be streamed on request. This file has to be raw, H.264-encoded, video-only file. If you are not sure how to get one, you can just use this one (pre-rendered Sintel, OpenBlender project). With this configuration, our RaspberryPi is ready to stream!

"},{"location":"tutorials/ICE_tutorial/#the-end-result","title":"The end result","text":""},{"location":"tutorials/ICE_tutorial/#playing-the-video","title":"Playing the video","text":"

Now we finally can get out phone and start streaming the video! In order to do that, we have to click the \"New video stream\" button as shown on the screenshots below, enter the JID of the RaspberryPi and confirm with the \"Stream!\" button.

Hopefully, now you can see the video on your own mobile device.

"},{"location":"tutorials/Jingle-SIP-setup/","title":"Jingle/SIP setup proof of concept","text":"

This tutorial will show you how to configure MongooseIM, Routr (a SIP server) and client applications to demonstrate how the Jingle/SIP integration works.

"},{"location":"tutorials/Jingle-SIP-setup/#prerequisites","title":"Prerequisites","text":"

We are going to use the following open source software:

  • MongooseIM - https://github.com/esl/MongooseIM

    • see How-to-build for details on building. It's important to remember to run the configuration script with with-jingle-sip flag set: tools/configure with-jingle-sip. Without this, third party dependencies required by the Jingle/SIP translator will not be included in the release.
  • Routr (SIP server) - https://routr.io

    • I recommend downloading binaries for your system from official source.
  • Jitsi (XMPP and SIP client application) - https://desktop.jitsi.org
  • Otalk - web based XMPP client - https://github.com/otalk/otalk-im-client
    • Follow the instructions on otalk-im-client#installing to run it

We will use 2 users xmpp.user@xmpp.example and sip.user@sip.example.

"},{"location":"tutorials/Jingle-SIP-setup/#configuring-routr","title":"Configuring Routr","text":"

First the domain sip.example needs to be added to domains served by Routr. To do it, paste the following content to config/domains.yml in the directory where Routr was:

- apiVersion: v1beta1\n  kind: Domain\n  metadata:\n    name: SIP domain\n  spec:\n    context:\n      domainUri: sip.example\n

Then the sip.user@sip.example needs to be added to config/agents.yml like below:

- apiVersion: v1beta1\n  kind: Agent\n  metadata:\n    name: SIP User\n  spec:\n    credentials:\n      username: 'sip.user'\n      secret: '1234'\n    domains: [sip.example]\n

Now Routr can be started with

./routr\n

If all goes well we'll see the following output:

[INFO ] Starting Routr\n[INFO ] Listening  on 10.152.1.27:5060 [udp]\n[INFO ] Listening  on 10.152.1.27:5060 [tcp]\n[INFO ] Starting Location service\n[INFO ] Starting Registry service\n[INFO ] Starting Restful service (port: 4567, apiPath: '/api/v1beta1')\n

It is important to remember the IP address as it'll be used in next point.

"},{"location":"tutorials/Jingle-SIP-setup/#a-side-note","title":"A side note","text":"

In Routr's logs you may see messages like

[WARN ] Unable to register with Gateway -> sip.provider.net. (Verify your network status)\n

or

[ERROR] java.lang.RuntimeException: javax.sip.header.TooManyHopsException: has already reached 0!\n

They can be ignored for the purpose of the tutorial.

"},{"location":"tutorials/Jingle-SIP-setup/#configuring-etchosts","title":"Configuring /etc/hosts","text":"

In my case the IP reported by Routr was 10.152.1.27. Now we need to use this to update /etc/hosts file like below:

10.152.1.27     sip.example xmpp.example\n
"},{"location":"tutorials/Jingle-SIP-setup/#configuring-mongooseim","title":"Configuring MongooseIM","text":"

At this point I assume that MongooseIM was built with make rel, that it is running and the current working directory is _build/prod/rel/mongooseim. Similar to Routr, MongooseIM also needs to know which hosts to server. Please replace the default host defined in etc/mongooseim.toml; the line:

[general]\n  hosts = [\"localhost\"]\n

should be changed to:

[general]\n  hosts = [\"xmpp.example\", \"sip.example\"]\n

Now we need to enable mod_jingle_sip, please add the following line in modules list (somewhere around line 740 in the same file)

[modules.mod_jingle_sip]\n  proxy_host = \"sip.example\"\n

More details on MongooseIM configuration you can find in Configuration and in Modules configuration

Now we are registering both users in MongooseIM by calling the following commands:

mongooseimctl account registerUser --username xmpp.user --domain xmpp.example --password test_pass\nmongooseimctl account registerUser --username sip.user --domain sip.example --password test_pass\n

Yes, we need to have the sip.user@sip.example registered in MongooseIM. This is needed because a Jingle call can be initiated by a regular XMPP client only when the app knows the other user's full JID. The easiest way to achieve that is to exchange presence information between these 2 users. This can happen automatically if 2 xmpp users have each other in the roster.

The roster can be set up with the following command:

mongooseimctl roster setMutualSubscription --userA xmpp.user@xmpp.example --userB sip.user@sip.example --action CONNECT\n
"},{"location":"tutorials/Jingle-SIP-setup/#adding-users-to-jitsi","title":"Adding users to Jitsi","text":"

Now the sip.user@sip.example has to be added to Jitsi app. When the app is opened for the first time it will display a window to configure the user. Later users can be configured from the Preferences page.

"},{"location":"tutorials/Jingle-SIP-setup/#adding-a-sip-user","title":"Adding a SIP user","text":"

In order to add a user who connects to the SIP server we need to choose the SIP protocol from the available networks in Jitsi. In the SIP id field we put sip.user@sip.example and in the Password field we put 1234 as in the agents.yml file. Now we need to switch to Advanced options and go to the Connection tab. Here we need to unselect the Configure proxy automatically and put the IP of our Routr server, port number 5060 and TCP as the preferred transport.

"},{"location":"tutorials/Jingle-SIP-setup/#adding-an-xmpp-user","title":"Adding an XMPP user","text":"

Now we have to add sip.user@sip.example to Jitsi's XMPP network in order to connect this user to MongooseIM over XMPP. It's very similar to adding a user to Jitsi's SIP network, the only difference is the password, for the XMPP connection it's test_pass as set when registering the user in MongooseIM. Here we also need to go to the Advanced window and the Connection tab in order to put the IP address (the same as before) in the Connect Server field. Remember to check the Override server default options box.

To connect sip.user@sip.exmple to MongooseIM over XMPP is to cheat Jingle a bit, so that the client app for user sip.xmpp@xmpp.example can start the Jingle call. When Jitsi connects this user, it will likely display a warning about the server's certificate. This is because by default MongooseIM is configured with a freshly generated, self-signed certificate. We can click Continue anyway button in order to proceed.

"},{"location":"tutorials/Jingle-SIP-setup/#adding-user-to-otalk","title":"Adding user to Otalk","text":"

Please follow the instructions on https://github.com/otalk/otalk-im-client#installing in order to compile and run the app. If all goes well, you should see the following message printed in the console:

demo.stanza.io running at: http://localhost:8000\n

This means that the app is hosted on http://localhost:8000.

At this point I also recommend opening wss://localhost:5285/ws-xmpp in the same browser. This endpoint works correctly only for WebSocket connections but most probably you will be prompted about the certificate. This is again due to the self-signed certificate. We need to add an exception for this certificate in order to successfully connect from Otalk.

Now let's open http://localhost:8000 where the Otalk app is hosted. In the Log in section put xmpp.user@xmpp.example in the JID field and test_pass in the Password filed. The default WebSocket endpoint in the WebSocket or BOSH URL field needs to be changed to:

wss://localhost:5285/ws-xmpp\n

Mind the wss protocol, Otalk will not connect the user over WebSockets if for example https is put in the field.

Now we can hit the Go! button and the xmpp.user@xmpp.example will connect to MongooseIM.

"},{"location":"tutorials/Jingle-SIP-setup/#making-a-call","title":"Making a call","text":"

On the left side we can see that the user already has sip.user@sip.example in the roster and there should be a green dot indicating that the user is online. When we click on the contact, the Call button should appear allowing us to initiate the call.

In Jitsi, the following window should pop up:

Behind the scene the following SIP request was send from MongooseIM to Routr.

INVITE sip:sip.user@sip.example:5060 SIP/2.0\nVia: SIP/2.0/TCP localhost:5600;rport;branch=z9hG4bK1HMB3o-3mbahM\nFrom: xmpp.user <sip:xmpp.user@xmpp.example>;tag=aVEBue\nTo: sip.user <sip:sip.user@sip.example>\nCall-ID: ae602f16-d57d-4452-b83e-36e54bb6d325\nCSeq: 159913767 INVITE\nMax-Forwards: 70\nContent-Length: 2243\nContact: <sip:xmpp.user@localhost:5600;ob;transport=tcp>;+sip.instance=\"<urn:uuid:f45950f1-70cd-229d-6c2b-8c85903ce14e>\"\nContent-Type: application/sdp\nSupported: outbound,100rel,path\nAllow: PRACK,INVITE,ACK,CANCEL,BYE,OPTIONS,INFO,UPDATE,SUBSCRIBE,NOTIFY,REFER,MESSAGE\n\nv=0\no=- 1531401304 1531401304 IN IP4 127.0.0.1\ns=nksip\nc=IN IP4 127.0.0.1\nt=0 0\na=group:BUNDLE sdparta_0 sdparta_1\nm=audio 1436 UDP/TLS/RTP/SAVPF 109 9 0 8 101\na=sendrecv\na=mid:sdparta_0\na=setup:actpass\na=fingerprint:sha-256 44:84:41:8F:B7:A3:B7:37:BA:00:26:5E:B1:D6:AB:D0:56:56:CF:53:F2:05:DB:99:DE:D4:1C:63:A4:68:58:EA\na=ice-pwd:49ad0f02b4f5181c9af3c4006575e071\na=ice-ufrag:a3cc96e2\na=rtcp-mux\na=extmap:3 urn:ietf:params:rtp-hdrext:sdes:mid\na=extmap:2/recvonly urn:ietf:params:rtp-hdrext:csrc-audio-level\na=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\na=rtpmap:109 opus/48000/2\na=fmtp:109 useinbandfec=1;stereo=1;maxplaybackrate=48000\na=rtpmap:9 G722/8000\na=rtpmap:0 PCMU/8000\na=rtpmap:8 PCMA/8000\na=rtpmap:101 telephone-event/8000\na=fmtp:101 0-15\na=ssrc:1698222108 cname:{ce7fa171-069e-db4f-ba41-cfa4455c1033}\na=ssrc:1698222108 msid:{788b64bb-c4fc-b644-89b0-89f69c78f8b0} {2ba61f91-abca-3e48-84b7-85b57e8fdfb5}\nm=video 1031 UDP/TLS/RTP/SAVPF 120 121 126 97\na=sendrecv\na=mid:sdparta_1\na=setup:actpass\na=fingerprint:sha-256 44:84:41:8F:B7:A3:B7:37:BA:00:26:5E:B1:D6:AB:D0:56:56:CF:53:F2:05:DB:99:DE:D4:1C:63:A4:68:58:EA\na=ice-pwd:49ad0f02b4f5181c9af3c4006575e071\na=ice-ufrag:a3cc96e2\na=rtcp-mux\na=extmap:5 urn:ietf:params:rtp-hdrext:toffset\na=extmap:4 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\na=extmap:3 urn:ietf:params:rtp-hdrext:sdes:mid\na=rtpmap:120 VP8/90000\na=fmtp:120 max-fr=60;max-fs=12288\na=rtcp-fb:120 goog-remb\na=rtcp-fb:120 ccm fir\na=rtcp-fb:120 nack pli\na=rtcp-fb:120 nack\na=rtpmap:121 VP9/90000\na=fmtp:121 max-fr=60;max-fs=12288\na=rtcp-fb:121 goog-remb\na=rtcp-fb:121 ccm fir\na=rtcp-fb:121 nack pli\na=rtcp-fb:121 nack\na=rtpmap:126 H264/90000\na=fmtp:126 packetization-mode=1;level-asymmetry-allowed=1;profile-level-id=42e01f\na=rtcp-fb:126 goog-remb\na=rtcp-fb:126 ccm fir\na=rtcp-fb:126 nack pli\na=rtcp-fb:126 nack\na=rtpmap:97 H264/90000\na=fmtp:97 level-asymmetry-allowed=1;profile-level-id=42e01f\na=rtcp-fb:97 goog-remb\na=rtcp-fb:97 ccm fir\na=rtcp-fb:97 nack pli\na=rtcp-fb:97 nack\na=ssrc:823938224 cname:{ce7fa171-069e-db4f-ba41-cfa4455c1033}\na=ssrc:823938224 msid:{788b64bb-c4fc-b644-89b0-89f69c78f8b0} {a7f87c8d-6002-fd4c-badb-13383c759e48}\n

And Routr sent the Ringing response code to MongooseIM as soon as the Jitsi app displayed the incoming call window:

SIP/2.0 180 Ringing\nCSeq: 159913767 INVITE\nCall-ID: ae602f16-d57d-4452-b83e-36e54bb6d325\nFrom: \"xmpp.user\" <sip:xmpp.user@xmpp.example>;tag=aVEBue\nTo: \"sip.user\" <sip:sip.user@sip.example>;tag=9b4c72a3\nVia: SIP/2.0/TCP localhost:5600;rport=54071;branch=z9hG4bK1HMB3o-3mbahM;received=10.152.1.27\nContact: \"sip.user\" <sip:sip.user@10.152.1.27:53697;transport=tcp;registering_acc=sip_example>\nUser-Agent: Jitsi2.10.5550Mac OS X\nContent-Length: 0\n
"},{"location":"tutorials/Jingle-SIP-setup/#summary","title":"Summary","text":"

The example above showcases how you can use Jingle/SIP switch with the available open source software. Sonetel, who are this feature's sponsor, operate on a slightly different use case and utilize more of the functionality with their proprietary software. Current implementation makes following assumptions:

  • The peer-to-peer stream is always encrypted. This means that MongooseIM expects element <fingerprint> as described in XEP-0320: Use of DTLS-SRTP in Jingle Sessions to be in the content description. Not every open source XMPP client supporting Jingle supports this encryption.
  • MongooseIM expects that the 200 OK response contains at least one ICE candidate to set the peer-to-peer connection up.

This makes the current implementation a bit limited, but on the other hand the basic integration between XMPP and SIP world is already there. Based on the current state it can be improved and extended if needed.

"},{"location":"tutorials/client-certificate/","title":"How to Set up SASL client certificate authentication","text":""},{"location":"tutorials/client-certificate/#overview","title":"Overview","text":"

Clients connected to MongooseIM may authenticate with their TLS certificates. This method uses the SASL EXTERNAL mechanism.

"},{"location":"tutorials/client-certificate/#server-side-prerequisites","title":"Server-side prerequisites","text":""},{"location":"tutorials/client-certificate/#properly-configure-client-to-server-c2s-listener","title":"Properly configure Client-to-server (C2S) listener","text":"

A server must request the certificate from a client, so you'll need to set verify_mode option to \"peer\" and provide a path to CA chain that may be used for client's certificate check (cacertfile option).

Please check the Listener modules page for more information or simply follow the examples at the end of this section.

"},{"location":"tutorials/client-certificate/#properly-configure-http-listener","title":"Properly configure http listener","text":"

SASL EXTERNAL authentication is also possible for WebSocketSecure and BOSH connections over HTTPS. Similarly as in the client-to-server case, the server must request the certificate from the client. In this case it's enabled by adding the following options to the tls option of listen.http :

  • tls.verify_mode = \"peer\" - this is to tell Erlang's SSL to request the cert from the client
  • tls.cacertfile = \"ca.pem\" - this is to tell Erlang's SSL where the CA cert file is in order to check if the cert is correctly signed

Please check HTTP-based services configuration for more details regarding http listener configuration.

"},{"location":"tutorials/client-certificate/#enable-sasl-external-mechanism","title":"Enable SASL EXTERNAL mechanism","text":"

A SASL EXTERNAL authentication mechanism is disabled by default. In order to enable it, please configure auth.sasl_mechanisms option in the MongooseIM config file.

[auth]\n  sasl_mechanisms = [\"external\"]\n

Obviously the list may be longer, if the system should support both the certificate and password based authentication.

The SASL EXTERNAL authentication mechanism requires a digital client certificate. This digital certificate should contain xmppAddr field(s), which is always checked first. If there is more than one JID specified in the xmppAddr fields, the client must include the authorisation entity which corresponds to the one of the specified JIDs.

When no xmppAddr is specified, the cn (common name) field might be used to provide the client's username, but it is optional and can be configured with the sasl_external option in the auth section.

If the client certificate does not contain a JID, the client must provide one in authorisation entity.

For the details please refer to XEP-0178: Best Practices for Use of SASL EXTERNAL with Certificates.

"},{"location":"tutorials/client-certificate/#enable-compatible-authentication-method","title":"Enable compatible authentication method","text":"

You need to enable one of the following authentication methods by using the auth.methods option in the MongooseIM configuration file.

  • \"pki\" - accepts user credentials,
  • \"http\" - accepts user credentials if the provided certificate is known and valid
  • \"ldap\" - accepts user credentials if a corresponding user account exists in LDAP.
"},{"location":"tutorials/client-certificate/#self-signed-certificates","title":"Self-signed certificates","text":"

By default MongooseIM doesn't accept self-signed certs for the SASL-EXTERNAL authentication. For development purposes, it is possible to tell MongooseIM to accept them.

"},{"location":"tutorials/client-certificate/#self-signed-certificates-for-regular-tcptls-connections","title":"Self-signed certificates for regular TCP/TLS connections","text":"

In order to tell MongooseIM to accept self-signed certs, the listen.c2s.tls.verify_mode option needs to be configured like below:

[listen.c2s]\n  tls.verify_mode = \"selfsigned_peer\"\n  tls.disconnect_on_failure = false\n  tls.cacertfile = \"ca.pem\"\n

where the tls.disconnect_on_failure is a boolean with the following meaning only for just_tls:

  • true - the connection is closed if a certificate is invalid,
  • false - the connection isn't closed, but the certificate is not returned if it's invalid. This leads to an authentication failure but allows the client to choose a different auth method if available.

For fast_tls backend, the configuration is the same, only the disconnect_on_failure is ignored.

"},{"location":"tutorials/client-certificate/#self-signed-certificates-for-ws-or-bosh","title":"Self-signed certificates for WS or BOSH","text":"

In order to accept self-signed certs for WS or BOSH connections, the tls options for http listener must have the following configured:

[listen.http]\n  tls.verify_mode = \"selfsigned_peer\"\n  tls.cacertfile = \"ca.pem\"\n
"},{"location":"tutorials/client-certificate/#examples","title":"Examples","text":"

Certificate authentication only.

[listen.c2s]\n  port = 5222\n  (...)\n  tls.cacertfile = \"ca.pem\"\n  tls.verify_peer = true\n\n[listen.http]\n  port = 5285\n  (...)\n  tls.cacertfile = \"ca.pem\"\n  tls.verify_peer = true\n\n  [[listen.http.handlers.mod_bosh]]\n    host = \"_\"\n    path = \"/http-bind\"\n\n  [[listen.http.handlers.mod_websockets]]\n    host = \"_\"\n    path = \"/ws-xmpp\"\n\n[auth]\n  method = [\"pki\"]\n  sasl_mechanisms = [\"external\"]\n

Authentication with a client certificate (validated with provided CA chain) or password (validated with data stored in RDBMS).

[listen.c2s]\n  port = 5222\n  (...)\n  tls.cacertfile = \"ca.pem\"\n  tls.verify_peer = true\n\n[auth]\n  methods = [\"rdbms\", \"pki\"]\n  sasl_mechanisms = [\"scram_sha1\", \"external\"]\n
"},{"location":"tutorials/client-certificate/#client-certificate-prerequisites","title":"Client certificate prerequisites","text":"

SASL EXTERNAL will be offered by the server only when a client provides a valid certificate.

Please check documentation of a specific authentication backend you're going to use.

"},{"location":"tutorials/client-certificate/#usage-example-gajim","title":"Usage example - Gajim","text":"

Verified with Gajim 0.16.8, installed from package gajim-0.16.8-1.fc25.noarch.

"},{"location":"tutorials/client-certificate/#generate-client-certificate","title":"Generate client certificate","text":"
openssl genrsa -des3 -out rootCA.key 4096\nopenssl req -x509 -new -nodes -key rootCA.key -sha256 -days 1024 -out rootCA.crt\nopenssl genrsa -out client.key 2048\nopenssl req -new -key client.key -out client.csr # Remember to provide username as Common Name!\nopenssl x509 -req -in client.csr -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out client.crt -days 500 -sha256\nopenssl pkcs12 -export -inkey client.key -in client.crt -out client.p12\n
"},{"location":"tutorials/client-certificate/#configure-mongooseim","title":"Configure MongooseIM","text":"

See examples in the section above. We recommend using the first snippet for simplicity.

You don't need to pre-create a user account in order to log in with a certificate.

"},{"location":"tutorials/client-certificate/#add-an-account-in-gajim","title":"Add an account in Gajim","text":"
  1. Edit -> Accounts -> Add.
  2. Pick \"I already have an account I want to use\".
  3. Jabber ID is [Common Name from certificate]@localhost (domain is different if you've changed it in hosts option). Press \"Next\".
  4. Untick \"Connect when I press Finish\" and press \"Advanced\".
  5. Unfold \"Client certificate\" and choose the .p12 you've created earlier. Tick \"Certificate is encrypted\".
  6. Click \"Close\" and set status to \"Available\". Tell Gajim to ignore the unverified server certificate (by default it's self-signed).

If Gajim fails to connect, try to restart it. Version 0.16.8 sometimes \"forgets\" to ask for the client certificate password.

"},{"location":"tutorials/iOS_tutorial/","title":"Build a complete iOS messaging app using XMPPFramework","text":"

Read our blog posts:

  • Build a complete iOS messaging app using XMPPFramework - Tutorial Part 1
  • Build a complete iOS messaging app using XMPPFramework - Part 2
"},{"location":"tutorials/iOS_tutorial/#yaxt-yet-another-xmpp-tutorial","title":"YAXT??! Yet another XMPP tutorial?","text":"

Well, this is going to be another tutorial, but I\u2019m going to try to make it a little bit different. This is an XMPP tutorial from an iOS developer\u2019s perspective. I\u2019ll try to answer all the questions I had when I started working in this area. This journey is going to go from no XMPP knowldege at all to having a fully functional instant messaging iOS appusing this cool protocol. We are going to be using the super awesome (yet overwhelming at the beginning\u2026) XMPPFramework library, and the idea is also to also mix in some iOS concepts that you are going to need for your app.

"},{"location":"tutorials/iOS_tutorial/#whats-xmpp","title":"What\u2019s XMPP?","text":"

From Wikipedia: Extensible Messaging and Presence Protocol (XMPP) is a communications protocol for message-oriented middleware based on XML.

This basically means XMPP is a protocol for exchanging stuff. What kind of stuff? Messages and presences. We all know what messages are, but what about presences? A presence is just a way of sharing a \u201cstatus\u201d, that\u2019s it. You can be \u2018online\u2019, 'offline\u2019, 'having lunch\u2019, or whatever you want. Also there\u2019s another important word: Extensible meaning it can grow. It started as an instant messaging protocol and it has grown into multiple fields for example IoT (Internet of Things). And last, but not least: every piece of information we are going to exchange under this protocol is going to be XML. I can heard you complaining but\u2026 Come on, it\u2019s not that bad!

"},{"location":"tutorials/iOS_tutorial/#why-do-we-need-xmpp-why-not-just-rest","title":"Why do we need XMPP? Why not just REST?","text":"

Well what other options do we have? On the one hand, a custom solution means building everything from scratch, that takes time. On the other hand, we have XMPP, a super tested technology broadly used by millions of people every day, so we can say that\u2019s an advantage over a custom approach.

Every time I talk about XMPP, someone asks me 'Why not just REST?\u2019. Well, there is a misconception here. REST is not a protocol, it\u2019s just a way of architecting a networked application; it\u2019s just a standardized way of doing something (that I love btw). So let\u2019s change the question to something that makes more sense: \u201cWhy not just build a custom REST chat application?\u201d. The first thing that comes to my mind is what I already explained in the previous paragraph, but there is something else. How do I know when someone has sent me a message? For XMPP this is trivial: we have an open connection all the time so, as soon as a message arrives to the server, it will send us the message. We have a full-duplex. On the other hand, the only solution with REST is polling. We will need to ask the server for new messages from time to time to see if there is something new for us. That sucks. So, we will have to add a mechanism that allows us to receive the messages as soon as they are created, like SSE or WebSockets.

There is one more XMPP advantage over a custom REST chat application. REST uses HTTP, an application level protocol that is built on top of a transport level protocol: TCP. So every time you want to use your REST solution, you will need HTTP, a protocol that is not always available everywhere (maybe you need to embed this in a cheap piece of hardware?). Besides, we have XMPP built on top of TCP that\u2019s going to be always available.

"},{"location":"tutorials/iOS_tutorial/#whats-the-basic-stuff-i-need-to-know-to-get-started","title":"What\u2019s the basic stuff I need to know to get started?","text":"

Well, you know a lot already but let\u2019s make a list. Lists are always good:

  • XMPP is built on top of TCP. It keeps an open connection all the time.
  • Client/Server architecture. Messages always go through a server.
  • Everything we send and receive is going to be XML and it\u2019s called Stanza.
  • We have three different types of stanzas: iq, message and presence.
  • Every individual on the XMPP network is univocally identified by a JID (Jabber ID).
  • All the stanzas are contained in a Stream. Let\u2019s imagine the Stream as a white canvas where you and the server write the stanzas.
  • Stream, iq, message and presence are the core of XMPP. You can find everything perfectly detailed in RFC6120 XMPP can be extended to accomplish different stuff. Each extension is called XEP (XMPP Extension Protocol).
"},{"location":"tutorials/iOS_tutorial/#whats-a-jid","title":"What\u2019s a JID?","text":"

Jabber ID (JID) is how we univocally identify each individual in XMPP. It is the address to where we are going to send our stanzas.

This is how a JID looks like:

  • localpart: This is your username.
  • domainpart: Server name where the localpart resides.
  • resourcepart: This is optional, and it identifies a particular client for the user. For example: I can be logged in with andres@erlang-solutions.com on my iPhone, on my Android and on my mac at the same time\u2026 So all these will be the same localpart + domainpart but different resourcepart

I\u2019m sure you have already noticed how similar the JID looks to a standard email address. This is because you can connect multiple servers together and the messages are rooted to the right user in the right server, just as email works. Pretty cool, right?

Sometimes you will see we have a JID with just the domain part. Why?! Because it\u2019s also possible to send stanzas to a service instead of a user. A service? What\u2019s a service?! Services are different pieces of an XMPP server that offer you some special functionality, but don\u2019t worry about this right now, just remember: you can have JIDs without a localpart.

"},{"location":"tutorials/iOS_tutorial/#whats-a-stanza","title":"What\u2019s a Stanza?","text":"

Stanza is the name of the XML pieces that we are going to be sending and receiving. The defined stanzas are: <message/>, <presence/> and <iq/>.

"},{"location":"tutorials/iOS_tutorial/#message","title":"<message/>","text":"

This is a basic <message/> stanza. Every time you want to send a message to someone (a JID), you will have to send this stanza:

<message from='andres@erlang-solutions.com/iphone' to='juana@erlang-solutions.com' type='chat'>\n    <body>Hey there!</body>\n</message>\n
"},{"location":"tutorials/iOS_tutorial/#iq","title":"<iq/>","text":"

It stands for Info/Query. It\u2019s a query-action mechanism, you send an iq and you will get a response to that query. You can pair the iq-query with the iq-response using the stanza id.

For example, we send an iq to the server to do something (don\u2019t pay attention to what we want to do\u2026 you just need to know there is an iq stanza and how the mechanism works):

<iq to='erlang-solutions.com' type='get' id='1'>\n  <query xmlns='http://jabber.org/protocol/disco#items'/>\n</iq>\n

And we get back another iq with the same id with the result of the previous query:

<iq from='erlang-solutions.com' to='ramabit@erlang-solutions.com/Andress-MacBook-Air' id='1' type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='muc.erlang-solutions.com'/>\n        <item jid='muclight.erlang-solutions.com'/>\n        <item jid='pubsub.erlang-solutions.com'/>\n    </query>\n</iq>\n
"},{"location":"tutorials/iOS_tutorial/#presence","title":"<presence/>","text":"

Used to exchange presence information, as you could have imagined. Usually presences are sent from the client to the server and broadcasted by it. The most basic, yet valid presence, to indicate to the server that a user is available is:

<presence/>\n

After a successful connection, you are not going to receive any <message/> until you make yourself available sending the previous presence.

If you want to make yourself unavailable, you just have to send:

<presence type=\"unavailable\"></presence>\n

If we want to make the presences more useful, we can send something like this:

<presence>\n      <status>On vacation</status>\n</presence>\n
"},{"location":"tutorials/iOS_tutorial/#whats-a-stream","title":"What\u2019s a Stream?","text":"

Before answering this, let\u2019s refresh our mind. What\u2019s a Unix socket? From Wikipedia: A socket is a special file used for inter-process communication. These allows communication between two processes. So a socket is a file that can be written by two processes (in the same computer or in different computers in the same network). So the client is going to write to this file and server too.

Ok, but how is a socket related to a Stream? Well, we are going to be connected to a server using a socket, therefore we are going to have a 'shared file\u2019 between the client and the server. This shared file is a white canvas where we are going to start writing our XML stanzas. The first thing we are going to write to this file is an opening <stream> tag! And there you go\u2026 that\u2019s our stream.

Perfect, I understand what a stream is, but I still don\u2019t understand how to send a message to the server. Well, the only thing we need to do to send a message is writing a stanza in our shared file. But what happens when the server wants to send me a message? Simple: it will write the message in the 'shared file\u2019.

"},{"location":"tutorials/iOS_tutorial/#are-we-ok-so-far","title":"Are we ok so far?","text":"

I\u2019m sure at this point you have questions like:

  • \u201cWhat?! An active TCP connection open all the time? I\u2019m used to REST! How am I going to do that?!\u201d
  • Easy, you don\u2019t have to care about that any more! That\u2019s why we are going to use the library, and it will take care of that.
  • \u201cYou said nothing about how to connect to the server!\u201d
  • Believe me, you don\u2019t have to care about this either. If we start adding all this info, we are going to get crazy. Trust me, I\u2019ve been there.
  • \u201cWhat about encrypted messages? We need security! How are we going to handle this?\u201d
  • Again, you don\u2019t have to care about this at this point. Baby steps!

You just need to be able to answer: \u201cWhat\u2019s XMPP?\u201d, \u201cHow do you send a message?\u201d, \u201cHow do you change your status in XMPP?\u201d, \u201cHow do you ask something to the server?\u201d, \u201cWhat\u2019s a Stream?\u201d. If you can answer all that, you are WAY better than me when I started.

"},{"location":"tutorials/iOS_tutorial/#first-steps-installing-the-xmppframework-library","title":"First steps: installing the XMPPFramework library","text":"

Let\u2019s create a brand new Xcode project and install the library. In this tutorial we are going to be using Swift 3. The easiest way to integrate XMPPFramework to the project is using CocoaPods.

Let\u2019s create our Podfile using the pod init command in the folder where our .xcodeproj lives. There are thousands of forks but the maintained one is the original: robbiehanson/XMPPFramework.

So let\u2019s add the pod to our Podfile and remember to uncomment the use_frameworks!.

use_frameworks!\n\ntarget 'CrazyMessages' do\n    pod 'XMPPFramework', :git=> 'git@github.com:robbiehanson/XMPPFramework.git', :branch => 'master'\nend\n

Then pod install and CocoaPods is going to do its magic and create a .xcworkspace with the library integrated. Now we just need to import XMPPFramework in the files we want to use the library and that\u2019s it.

"},{"location":"tutorials/iOS_tutorial/#starting-to-build-our-instant-messaging-app","title":"Starting to build our Instant Messaging app","text":"

The most important thing in an XMPP application is the stream, that\u2019s where we are going to \u201cwrite\u201d our stanzas, so we need an object that is going to hold it. We are going to create an XMPPController class with an XMPPStream:

import Foundation\nimport XMPPFramework\n\nclass XMPPController: NSObject {\n    var xmppStream: XMPPStream\n\n    init() {\n        self.xmppStream = XMPPStream()  \n    }\n\n}\n

We are dealing with a highly asynchronous library here. For every action we are going to have a response some time in the future. To handle this XMPPFramework defines the XMPPStreamDelegate. So implementing that delegate is going to help us answer lots of different questions like: \u201cHow do I know when XMPP has successfully connected?\u201d, \u201cHow do I know if I\u2019m correctly authenticated?\u201d, \u201cHow do I know if I received a message?\u201d. XMPPStreamDelegate is your friend!

So we have our XMPPController and our XMPPStream, what do we need to do now? Configure our stream with the hostName, port and ourJID. To provide all this info to the controller we are going to make some changes to the init to be able to receive all these parameters:

enum XMPPControllerError: Error {\n    case wrongUserJID\n}\n\nclass XMPPController: NSObject {\n    var xmppStream: XMPPStream\n\n    let hostName: String\n    let userJID: XMPPJID\n    let hostPort: UInt16\n    let password: String\n\n    init(hostName: String, userJIDString: String, hostPort: UInt16 = 5222, password: String) throws {\n        guard let userJID = XMPPJID(string: userJIDString) else {\n            throw XMPPControllerError.wrongUserJID\n        }\n\n        self.hostName = hostName\n        self.userJID = userJID\n        self.hostPort = hostPort\n        self.password = password\n\n        // Stream Configuration\n        self.xmppStream = XMPPStream()\n        self.xmppStream.hostName = hostName\n        self.xmppStream.hostPort = hostPort\n        self.xmppStream.startTLSPolicy = XMPPStreamStartTLSPolicy.allowed\n        self.xmppStream.myJID = userJID\n\n        super.init()\n\n        self.xmppStream.addDelegate(self, delegateQueue: DispatchQueue.main)\n    }\n}\n

Our next step is going to actually connect to a server and authenticate using our userJID and password, so we are adding a connect method to our XMPPController.

func connect() {\n    if !self.xmppStream.isDisconnected() {\n        return\n    }\n\n   try! self.xmppStream.connect(withTimeout: XMPPStreamTimeoutNone)\n}\n

But how do we know we have successfully connected to the server? As I said earlier, we need to check for a suitable delegate method from XMPPStreamDelegate. After we connect to the server we need to authenticate so we are going to do the following:

extension XMPPController: XMPPStreamDelegate {\n\n    func xmppStreamDidConnect(_ stream: XMPPStream!) {\n        print(\"Stream: Connected\")\n        try! stream.authenticate(withPassword: self.password)\n    }\n\n    func xmppStreamDidAuthenticate(_ sender: XMPPStream!) {\n        self.xmppStream.send(XMPPPresence())\n        print(\"Stream: Authenticated\")\n    }\n}\n

We need to test this. Let\u2019s just create an instance of XMPPController in the AppDelegate to test how it works:

try! self.xmppController = XMPPController(hostName: \"host.com\",\n                                     userJIDString: \"user@host.com\",\n                                          password: \"password\")\nself.xmppController.connect()\n

If everything goes fine we should see two messages in the logs but of course that\u2019s not happening, we missed something. We never told to our xmppStream who was the delegate object! We need to add the following line after the super.init()

self.xmppStream.addDelegate(self, delegateQueue: DispatchQueue.main)\n

If we run the app again:

Stream: Connected\nStream: Authenticated\n

Success! We have our own XMPPController with a fully functional and authenticated stream!

Something that may catch your attention is how we are setting our delegate, we are not doing:

self.xmppStream.delegate = self\n

Why not? Because we can \u201cbroadcast\u201d the events to multiple delegates, we can have 10 different objects implementing those methods. Also we can tell what\u2019s the thread where we want to receive that call, in the previous example we want it in the main thread.

"},{"location":"tutorials/iOS_tutorial/#getting-a-log-in","title":"Getting a Log In","text":"

Our app is super ugly, let\u2019s put on some makeup! We have nothing but an XMPPController and a hardcoded call in the AppDelegate. I\u2019m going to create a ViewController that is going to be presented modally as soon as the app starts, that ViewController will have the necessary fields/info to log in to the server.

I\u2019m going to create a LogInViewControllerDelegate that is going to tell to our ViewController that the Log in button was pressed and that\u2019s it. In that delegate implementation we are going to create our XMPPController, add the ViewControlleras delegate of the XMPPStream and connect!

extension ViewController: LogInViewControllerDelegate {\n\n    func didTouchLogIn(sender: LogInViewController, userJID: String, userPassword: String, server: String) {\n        self.logInViewController = sender\n\n        do {\n            try self.xmppController = XMPPController(hostName: server,\n                                                     userJIDString: userJID,\n                                                     password: userPassword)\n            self.xmppController.xmppStream.addDelegate(self, delegateQueue: DispatchQueue.main)\n            self.xmppController.connect()\n        } catch {\n            sender.showErrorMessage(message: \"Something went wrong\")\n        }\n    }\n}\n

Why are we adding ViewController as a delegate of XMPPStream if our XMPPController already has that delegate implemented? Because we need to know if this connection and authentication was successful or not in our ViewController so we are able to dismiss the LogInViewController or show an error message if something failed. This is why being able to add multiple delegates is so useful.

So as I said I\u2019m going to make ViewController to comform to the XMPPStreamDelegate:

extension ViewController: XMPPStreamDelegate {\n\n    func xmppStreamDidAuthenticate(_ sender: XMPPStream!) {\n        self.logInViewController?.dismiss(animated: true, completion: nil)\n    }\n\n    func xmppStream(_ sender: XMPPStream!, didNotAuthenticate error: DDXMLElement!) {\n        self.logInViewController?.showErrorMessage(message: \"Wrong password or username\")\n    }\n\n}\n

And that\u2019s it! Our app can log in to our server as I\u2019m showing here:

"},{"location":"tutorials/iOS_tutorial/#logging","title":"Logging!","text":"

We\u2019ve been talking a lot about XMPP, stanzas and streams\u2026 but is there a way I can see the stream? Yes SR! XMPPFramework got us covered!

XMPPFramework ships with CocoaLumberJack, a pretty well known logging framework. We just need to configure it, set the logging level we want and that\u2019s it. Logs are going to start showing up!

"},{"location":"tutorials/iOS_tutorial/#configuring-cocoalumberjack","title":"Configuring CocoaLumberjack","text":"

This is a really simple task, you just need to add to your func application(application: UIApplication, didFinishLaunchingWithOptions ... method the following line (remember to import CocoaLumberjack):

DDLog.add(DDTTYLogger.sharedInstance(), with: DDLogLevel.all)\n

I\u2019m not going to paste here all the connection process log because it makes no sense to try to understand what\u2019s going on at this stage of our learning. But I think showing what some stanzas look like is a good idea. To do this I\u2019m going to be sending messages from Adium.

I\u2019m going to send this <message/>:

<message to=\"test.user@erlang-solutions.com\">\n    <body>This is a message sent from Adium!</body>\n</message>\n

Let\u2019s see how it looks like when it reaches our app:

<message xmlns=\"jabber:client\" from=\"iamadium@erlang-solutions.com/MacBook-Air\" to=\"test.user@erlang-solutions.com\">\n   <body>This is a message sent from Adium!</body>\n</message>\n

Let\u2019s send a <presence/> from Adium:

<presence>\n    <status>On vacation</status>\n</presence>\n

We are receiving:

<presence xmlns=\"jabber:client\" from=\"iamadium@erlang-solutions.com/MacBook-Air\" to=\"test.user@erlang-solutions.com\">\n   <status>On vacation</status>\n</presence>\n

No doubts at all right? We send something and we receive it on the other end! That\u2019s it!

"},{"location":"tutorials/iOS_tutorial/#test-time","title":"Test Time!","text":"

I want to be sure that you are understanding and following everything and not just copy and pasting from a tutorial (as I usually do \ud83d\ude4a). So if you are able to answer these questions you are on a good track!

  • Why am I sending a presence after successfully authenticating? What happens if I don\u2019t send it?
  • What happens if I write a wrong server URL in the Log In form? How do I fix this problem if there is a problem\u2026
  • How do I detect if suddenly the stream is disconnected from the server? (maybe a network outage?)
  • How do I detect if the user/password was wrong?

If you need help leave a message!

"},{"location":"tutorials/push-notifications/MongoosePush-setup/","title":"Push notifications with MongoosePush","text":"

MongoosePush is a simple RESTful service written in Elixir. It provides the ability to send push notifications to FCM (Firebase Cloud Messaging) and/or APNS (Apple Push Notification Service) via their HTTP/2 API.

To take advantage of MongoosePush's functionality, you will need to enable the mod_push_service_mongoosepush module: this module acts as a bridge between the push_notifications hook and MongoosePush itself.

"},{"location":"tutorials/push-notifications/MongoosePush-setup/#getting-started","title":"Getting started","text":"

To enable integration with MongoosePush, it is as simple as the next two steps. First, you need to define a pool of HTTPS connections to MongoosePush in the outgoing_pools section:

[outgoing_pools.http.mongoose_push_http]\n  scope = \"global\"\n  strategy = \"available_worker\"\n\n  [outgoing_pools.http.mongoose_push_http.connection]\n    host = \"https://localhost:8443\"\n

And second, you need to add mod_push_service_mongoosepush to the modules section in the config file:

[modules.mod_push_service_mongoosepush]\n  pool_name = \"mongoose_push_http\"\n  api_version = \"v3\"\n

Here, we assume that MongoosePush will be available on the localhost on port 8443, which is the default one \u2014\u00a0note the server option in the outgoing pool definition. Next we enable mod_push_service_mongoosepush. The first option is the name of the HTTP pool to use and the second one is the version of MongoosePush's API (\"v2\" or \"v3\" are supported).

And that's it, we've just completed the entire MongooseIM configuration. All we need to do now is to set up MongoosePush.

"},{"location":"tutorials/push-notifications/MongoosePush-setup/#starting-mongoosepush","title":"Starting MongoosePush","text":"

The easiest way to start MongoosePush is using its docker image. But before you can set MongoosePush up, you need a FCM application token and/or an APNS application certificate. You can get the FCM token here and the easiest way of getting an APNS application certificate is by running this script (please note that you need the certificate in pem format).

After you get the FCM application token and/or the APNS application certificate, you can prepare to start MongoosePush. Firstly, prepare the following files structure:

  • priv/
    • ssl/
      • rest_cert.pem - The REST endpoint certificate
      • rest_key.pem - private key for the REST endpoint certificate
    • apns/
      • prod_cert.pem - Production APNS app certificate
      • prod_key.pem - Production APNS app certificate's private key
      • dev_cert.pem - Development APNS app certificate
      • dev_key.pem - Development APNS app certificate's private key
    • fcm/
      • token.json - FCM service account JSON file

If your FCM app token is MY_FCM_SECRET_TOKEN and you have the priv directory with all certificates in the current directory, start MongoosePush with the following command:

docker run -v `pwd`/priv:/opt/app/priv \\\n  -e PUSH_FCM_APP_FILE=\"MY_FCM_SECRET_TOKEN\" \\\n  -e PUSH_HTTPS_CERTFILE=\"/opt/app/priv/ssl/rest_cert.pem\" \\\n  -e PUSH_HTTPS_KEYFILE=\"/opt/app/priv/ssl/rest_key.pem\" \\\n  -it --rm mongooseim/mongoose-push:2.0.0\n

If you don't want to use either APNS or FCM, you simply need to pass PUSH_APNS_ENABLED=0 or PUSH_FCM_ENABLED=0 respectively as additional env variables in your docker run command. For more advanced options and configuration please refer to \"Quick start / Configuring\" in MongoosePush's README.md.

When your MongoosePush docker is up and running, Push Notifications can be used in your MongooseIM instance.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/","title":"Using push notifications on the client side","text":"

There are just a few things the XMPP client application needs to receive the push notifications. Depending on whether you plan to use PubSub-full or PubSub-less configuration, some of the steps may be unnecessary.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#registering-with-a-push-service-provider","title":"Registering with a Push Service provider","text":"

First, the client application has to get a device-specific token from the Push Service Provider (FCM or APNS). This process is different, depending on the platform, so please consult your Push Service Provider's manual to see how to get this token. For example, here you can learn about setting up FCM on Android platform and here you can learn about setting up APNS on iOS platform.

After this step, your application shall be able to receive FCM or APNS token - it will be required in the next step of this tutorial.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#setting-up-an-xmpp-pubsub-node","title":"Setting up an XMPP pubsub node","text":"

This step is specific to the PubSub-full push configuration that you chose for your MongooseIM server. If you're running a PubSub-less configuration, skip to this point.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#creating-a-new-push-node","title":"Creating a new push node","text":"

In this example mypubsub.com is a domain of the MongooseIM server that has mod_pubsub enabled with the push node support. The client sends the following stanza to the server:

<iq type='set'\n    to='pubsub.mypubsub.com'\n    id='create1'>\n  <pubsub xmlns='http://jabber.org/protocol/pubsub'>\n    <create node='punsub_node_for_my_private_iphone' type='push'/>\n    <configure>\n      <x xmlns='jabber:x:data' type='submit'>\n        <field var='FORM_TYPE' type='hidden'>\n          <value>http://jabber.org/protocol/pubsub#node_config</value>\n        </field>\n        <field var='pubsub#access_model'>\n          <value>whitelist</value>\n        </field>\n        <field var='pubsub#publish_model'>\n          <value>publishers</value>\n        </field>\n      </x>\n    </configure>\n  </pubsub>\n</iq>\n

The pubsub.mypubsub.com will be used as a gateway for all notifications and will pass them through to the APNS and/or FCM.

The most important and only difference from the standard node creation is the type='push' part of the create element. According to XEP-0357: Push Notifications, a PubSub node is required to route the push notification mechanism. This implies you need a node that will handle your push notifications, hence we create a node called punsub_node_for_my_private_iphone. This node should be unique to the device and you may reuse nodes already created this way. The token obtained from APNS or FCM is a good option to ensure this uniqueness, by either using it directly or within some custom node name generation. It is also important from the security perspective to configure the node with:

  • access_model set to whitelist so only affiliated users can access the node.
  • publish_model set to publishers so only users with publisher or publisher_only role can publish notifications.
"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#adding-the-servers-jid-to-allowed-publishers","title":"Adding the server's JID to allowed publishers","text":"

Push notifications to the push node are addressed from your server's JID. If the push node was configured with the above recommended options, you need to allow your server's JID to publish notifications to that node. Considering your JID is alice@mychat.com, your server's JID is just mychat.com. The following stanza sent to the just created push node will allow your server JID to publish notifications:

<iq to='pubsub.mypubsub.com'\n    type='set'\n    id='wy6Hibg='\n    from='alice@mychat.com/resource'>\n    <pubsub xmlns='http://jabber.org/protocol/pubsub#owner'>\n        <affiliations node='punsub_node_for_my_private_iphone'>\n            <affiliation jid='mychat.com' affiliation='publish-only'/>\n        </affiliations>\n    </pubsub>\n</iq>\n
"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#enabling-push-notifications","title":"Enabling push notifications","text":"

The next and the last step is to enable push notifications on the server that handles your messages (and has mod_event_pusher_push enabled). Let's assume this server is available under the domain mychat.com.

To enable push notifications in the simplest configuration, just send the following stanza:

<iq type='set' id='x43'>\n  <enable xmlns='urn:xmpp:push:0' jid='pubsub.mypubsub.com' node='punsub_node_for_my_private_iphone'>\n    <x xmlns='jabber:x:data' type='submit'>\n      <field var='FORM_TYPE'><value>http://jabber.org/protocol/pubsub#publish-options</value></field>\n      <field var='service'><value>apns</value></field>\n      <field var='device_id'><value>your_pns_device_token</value></field>\n      <field var='silent'><value>false</value></field>\n      <field var='topic'><value>some_apns_topic</value></field>\n      <field var='priority'><value>some_priority</value></field>\n    </x>\n  </enable>\n</iq>\n

We have now enabled push notifications to be send to the pubsub.mypubsub.com domain on the node punsub_node_for_my_private_iphone created previously, or in the case of PubSub-less, for whatever unique node name we give here, for example any variation of the token obtained from APNS or FCM. Please note that publish-options are specific to various XMPP Push Services.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#publish-options","title":"Publish options","text":"

For mod_push_service_mongoosepush the next publish-options are mandatory:

  • device_id - device token (here: your_pns_device_token) that you received from your push notification service provider (as described in Registering with Push Service provider)
  • service - push notification service provider name (apns or fcm)

there are also some other publish-options supported:

  • mode - which may be either prod or dev (default to prod). Decides which connection pool type on MongoosePush shall be used. This may be used when APNS on MongoosePush is configured to work with both production and development certificate.
  • click_action - action to perform when notification is clicked on the device. activity on Android and category on iOS. Please refer to your platform / push notification service provider for more info.
  • topic - currently only used with APNS. The value is passed to APNS as topic header. For more information please refer to APNS documentation.
  • silent - if set to true, all notifications will be \"silent\". This means that only the data payload will be send to the push notifications provider with no notification. The data payload will contain all notification fields as defined in XEP-0357: Push Notifications.
  • priority \u2014 which may be either normal or high, and if not given, defaults to normal. This value will set the push notification priority. Please refer to FCM / APNS documentation for more details on those values.
  • sound - sound that should be played when a notification arrives. Please refer to FCM/APNS documentation for more details.
  • mutable_content - only applicable to APNS. If set to true, sets \"mutable-content=1\" in the APNS payload.
  • time_to_live - only applicable to FCM. Maximum lifespan of an FCM notification. Please refer to the FCM documentation for more details.

Any other publish-options are ignored by mod_push_service_mongoosepush

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#disabling-push-notifications","title":"Disabling push notifications","text":"

Disabling push notifications is very simple. Just send the following stanza to your XMPP chat server:

<iq type='set' id='x44'>\n  <disable xmlns='urn:xmpp:push:0' jid='pubsub.mypubsub.com' node='punsub_node_for_my_private_iphone'/>\n</iq>\n

You may skip the node='punsub_node_for_my_private_iphone' to globally disable push notifications on all nodes that are registered with your JID. This may be used to disable push notifications on all your devices.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#communication-overview","title":"Communication overview","text":"

One picture is worth a thousand words, so here are two diagrams showing the typical communication when using push notifications:

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#pubsub-full","title":"PubSub-full","text":""},{"location":"tutorials/push-notifications/Push-notifications-client-side/#pubsub-less","title":"PubSub-less","text":""},{"location":"tutorials/push-notifications/Push-notifications/","title":"How to set up Push Notifications","text":"

Push notifications are the bread and butter of the modern mobile experience, and MongooseIM has support for them. When used together with MongoosePush, you get out-of-the-box push notifications for FCM (Firebase Cloud Messaging) and APNS (Apple Push Notification Service) providers. And it's easy to extend it to any other protocols of your choice.

You might also want to read about the push notification's client side configuration.

All push notification mechanisms use mod_event_pusher_push as a backend implementation, read the relevant documentation to know more about it.

"},{"location":"tutorials/push-notifications/Push-notifications/#xep-0357-push-notifications","title":"XEP-0357: Push Notifications","text":"

Server side push notification support is fully compliant with XEP-0357: Push Notifications, which defines several components that need to work together in order to provide clients with working push notifications. However, there's just one non-optimal detail required by the aforementioned XEP: that push notifications being a PubSub service \u2014 we can do better than that.

If you're already familiar with the workings of XEP-0357: Push Notifications, make sure to have a look at our PubSub-less enhancement.

As it is always said, one picture is worth a thousand words:

Who does what is highly configurable. You may use MongooseIM as the XMPP server clients connect to, and send the push XMPP stanzas to a different server that will take care of the push business; or you might use MongooseIM as the remote XMPP-PubSub server that does such business. Note that the XEP doesn't enforce the push IQ stanza format, so whichever setup is used, you need to take care of the producing and processing of these stanzas.

You might also use MongooseIM as both, or you might even do both things within a single MongooseIM node (the most common setup!). Or, for the best performance, you might just skip that PubSub node altogether. While the whole setup can be incredibly extensible, we see the following straightforward uses of it.

"},{"location":"tutorials/push-notifications/Push-notifications/#xep-0357-compliant-with-local-pubsub","title":"XEP-0357 compliant with local PubSub","text":"

This is, historically, the most common setup. It allows your clients to enable push notifications via a local PubSub, and the IQ stanza is routed internally.

A direct connection to a push service (e.g. MongoosePush) must be configured on the same MongooseIM node. Check out this tutorial on how to setup MongoosePush.

[modules.mod_pubsub]\n  plugins = [\"push\"] # mandatory minimal config\n\n[modules.mod_event_pusher.push]\n  backend = \"mnesia\" # optional, default\n  wpool.workers = 200 # optional\n  plugin_module = \"mod_event_pusher_push_plugin_defaults\" # optional, default\n
"},{"location":"tutorials/push-notifications/Push-notifications/#advantages","title":"Advantages","text":"
  • Completely XEP-0357 compliant, and therefore compatible with any compliant 3rd party client library
  • No need to have two different servers
"},{"location":"tutorials/push-notifications/Push-notifications/#drawbacks","title":"Drawbacks","text":"
  • Less efficient (PubSub has a considerable impact on heavily loaded systems)
  • More load within a single node
  • Harder to customise
"},{"location":"tutorials/push-notifications/Push-notifications/#mongooseim-as-a-pubsub-less-xmpp-server","title":"MongooseIM as a PubSub-less XMPP server","text":"

PubSub is completely bypassed and clients don't need to create a push node \u2014 if they attempt to do so, and PubSub is not configured, the server would respond with an error stanza. They only have to provide the virtual PubSub address in the enable stanza, and node name can be anything unique. In order to ensure uniqueness the APNS/FCM token can be used. Note that the token must be provided as a publish option anyway.

A direct connection to a push service (e.g. MongoosePush) must be configured on the same MongooseIM node. Check out this tutorial on how to setup MongoosePush.

[modules.mod_event_pusher.push]\n  backend = \"mnesia\" # optional, default\n  wpool.workers = 200 # optional\n  plugin_module = \"mod_event_pusher_push_plugin_defaults\" # optional, default\n  virtual_pubsub_hosts = [\"pubsub.@HOST@\"]\n
"},{"location":"tutorials/push-notifications/Push-notifications/#advantages_1","title":"Advantages","text":"
  • No need to use PubSub at all
  • More efficient (PubSub has a considerable impact on heavily loaded systems)
  • Simpler client-side usage \u2014 Read about the client side configuration here
"},{"location":"tutorials/push-notifications/Push-notifications/#drawbacks_1","title":"Drawbacks","text":"
  • If the client application is built to create the push PubSub node, this might require a migration for such client \u2014 as he attempts to create the node, the server will answer with an IQ error stanza. If migrating the client side is a problem, there's a solution for that in the module section
"},{"location":"tutorials/push-notifications/Push-notifications/#virtual-pubsub-hosts","title":"Virtual PubSub hosts","text":"

These domains will shadow any identical domain configured for PubSub, stealing any notification published to it. It enables easy migration from PubSub-full deployments to PubSub-less variants. Read more in the relevant section.

"},{"location":"tutorials/push-notifications/Push-notifications/#overview-of-all-the-involved-mongooseim-components","title":"Overview of all the involved MongooseIM components","text":"

The components that make push notifications possible in MongooseIM comprise the following architecture:

PubSub-full setup PubSub-less setup"},{"location":"tutorials/push-notifications/Push-notifications/#mod_event_pusher_push","title":"mod_event_pusher_push","text":"

The first component that we need to configure in MongooseIM is the mod_event_pusher_push module.

"},{"location":"tutorials/push-notifications/Push-notifications/#mod_push_service_mongoosepush","title":"mod_push_service_mongoosepush","text":"

A connector to MongoosePush application. You can read more about it here.

"},{"location":"tutorials/push-notifications/Push-notifications/#mod_pubsubs-push-node","title":"mod_pubsub's push node","text":"

According to the XEP-0357: Push Notifications, all notifications generated via the module we have just enabled (i.e. mod_event_pusher_push) have to be send to a push enabled publish-subscribe node. In order to allow clients to allocate such a node, we need to enable it in our mod_pubsub on the MongooseIM server that will communicate with the XMPP Push Service.

"},{"location":"user-guide/Features/","title":"MongooseIM Features","text":"

MongooseIM is Erlang Solutions' robust, scalable and efficient XMPP server, aimed at large installations. Specifically designed for enterprise purposes, it is fault-tolerant and can utilise the resources of multiple clustered machines.

Some traits that make it unique include:

  • Massive scalability: simple growth through adding nodes provides costs-effectiveness as well as great resource utilisation.
  • Platform approach: designed with consistency, end-to-end battle testing across the whole ecosystem (all server and client components, and tools) can be performed.
  • Dynamic domains: thanks to the support for multi-tenancy, it is possible to set up thousands of domains dynamically without a noticeable performance overhead.
  • Code quality: extensive refactoring, substantial optimisations, continuous integration and deployment.
  • Extensive testing: automated continuous functional code coverage, integration testing, end-to-end testing with real clients.
  • Unique openness: no proprietary extensions, fully open source, fully open standards.
  • Contributions to (XMPP Standards Foundation): implementations of XEPs, innovations contributed.
  • Professional support and flexible customer service.
  • Contributions to third party open source projects: strengthening the Erlang and XMPP ecosystems.
"},{"location":"user-guide/Features/#architecture","title":"Architecture","text":"

MongooseIM brings configurability, scalability and fault-tolerance to the core feature of XMPP \u2013 routing messages. Its architecture is based on a set of pluggable extension modules that enable different features, including:

  • Websockets: long-lived connections in the browser
  • BOSH: HTTP long-polling
  • MUC (Multi-User Chat): group chat
  • Rosters: contact list, and subscriptions to users' presences
  • MAM: Message Archive Management
  • Message Carbons: for multi-device, real-time copies of all messages
  • Last activity
  • Metrics
  • Offline messages
  • Privacy settings
  • vCards: user profiles

This modular architecture allows high customisability and easy access to the required features.

MongooseIM enables authenticating users using external or internal databases (Mnesia, RDBMS, NoSQL), LDAP, HTTP or external scripts. It also allows connecting anonymous users, when required.

For storing persistent data, MongooseIM uses Mnesia (the distributed internal Erlang database), relational databases: MySQL, PostgreSQL or NoSQL alternative: Cassandra. Please take a look at database backends configurations to learn more. If necessary, MongooseIM can be customised to work with a different database. You can contact us to learn more.

Basic MongooseIM session storage is handled in Mnesia, but using Redis is also possible. It is also possible to store user Message Archives using ElasticSearch or Cassandra.

"},{"location":"user-guide/Features/#deployment-and-management","title":"Deployment and management","text":"

MongooseIM can be deployed for a number of scenarios fitting your needs. The simplest installation setup consists of a single MongooseIM node using Mnesia, so it does not require any additional services. Such system is sufficient for fast deployment and connecting XMPP clients.

A more scalable solution would be deploying MongooseIM with an external database for persistent data. Bigger setups may require a cluster of MongooseIM nodes, and a load balancer to manage the traffic from the client applications.

A single MongooseIM node can handle as many as 2.5 million online users. Based on our load tests, for deployments with multiple nodes, we are confident that 10 million online users is well within reach. Please note that such scalability numbers depend on the selected feature set that your MongooseIM installation is running.

For more details please see our blogpost: Scaling a Mongoose: How scalable is the MongooseIM XMPP server?

If the service requires a cluster of more than 10 nodes, we recommend using Redis instead of Mnesia for session storage. To avoid a single point of failure, a master-slave Redis setup is advisable.

MongooseIM allows connecting different clusters as parts of larger systems. This feature is used in geo-localised services handling massive traffic from all over the world.

MongooseIM gathers over 50 different XMPP-related metrics, allowing close monitoring of what happens inside the nodes. To manage the users, rosters, messages and general settings, we provide a GraphQL API which can be utilized via HTTP or command-line tool mongooseimctl(see GraphQL Admin API).

Erlang Solutions also provides WombatOAM, an Erlang VM monitoring solution, that enables ops and devs to better understand what going on in a MongooseIM cluster.

For load testing we use our own tools, that enable us to validate MongooseIM's scalability, given different scenarios.

"},{"location":"user-guide/Features/#multi-tenancy-dynamic-domains","title":"Multi-tenancy (dynamic domains)","text":"

MongooseIM supports multi-tenancy. This makes it possible to set up thousands of domains dynamically without a noticeable performance overhead. On more information on how to set up this feature, see dynamic domains configuration.

"},{"location":"user-guide/Features/#integration-with-other-platform-components","title":"Integration with other platform components","text":""},{"location":"user-guide/Features/#client-applications","title":"Client applications","text":"

In order to build client applications, the MongooseIM team recommends the following libraries:

XMPP REST API iOS XMPPframework, Objective-C Jayme, Swift Android Smack, Java Retrofit, Java Web Stanza.io/Strophe.js, JavaScript"},{"location":"user-guide/Features/#mongoosepush","title":"MongoosePUSH","text":"

MongooseIM can be integrated with MongoosePush. For more details visit the push notification user guide.

"},{"location":"user-guide/Features/#mongooseice","title":"MongooseICE","text":"

You can also connect Mongoose with MongooseICE. To get started, we recommend going through this tutorial.

"},{"location":"user-guide/High-level-Architecture/","title":"High-level Architecture","text":""},{"location":"user-guide/High-level-Architecture/#inside-mongooseim","title":"Inside MongooseIM","text":""},{"location":"user-guide/High-level-Architecture/#modules","title":"Modules","text":"

At its core MongooseIM is a huge message router you can customise to fit your system's needs. You can choose and enable behaviours and functionalities by configuring any of the available modules. A wide range of options includes authentication, privacy, storage, backend integration and mobile optimisations. See Extension Modules for a full list.

Modules can be configured and started either for all virtual hosts served by the instance or with individual configuration for only some of them. Modules may depend on services and on other modules. If a module depends on other modules, required modules are started automatically with configuration provided by the dependent module. If a module requires certain services which are not running, the module will not start.

"},{"location":"user-guide/High-level-Architecture/#services","title":"Services","text":"

Services provide certain functionalities not specific to virtual hosts but rather applied to the whole instance or to modules started for various hosts. They are configured globally and launched on startup, before modules, so that needed dependencies are satisfied. A service can require other services to be operational; required services are started automatically. The required service must also be present in the server's configuration file. Modules which are not host-specific are gradually being refactored to services.

"},{"location":"user-guide/High-level-Architecture/#databases","title":"Databases","text":"

MongooseIM manages two sets of data: transient for session data management, and persistent for archive and configurations.

Please refer to Database Backends doc for more configuration information.

"},{"location":"user-guide/High-level-Architecture/#transient-databases","title":"Transient databases","text":"

In the MongooseIM architecture each MongooseIM node host has an accompanying Mnesia node.

Redis on the other hand forms a separate cluster and does not utilise MongooseIM nodes.

There is no need to set up any backups for transient data since it naturally rebuilds as clients reconnect massively.

"},{"location":"user-guide/High-level-Architecture/#persistent-databases","title":"Persistent databases","text":"

Both RDBMS/SQL (MySQL/PostgreSQL) and NoSQL (Cassandra) databases are supported.

Backups should be regular, and tested.

"},{"location":"user-guide/High-level-Architecture/#ldap-directory","title":"LDAP directory","text":"

LDAP will also run on a separate cluster.

Backups should be regular, and tested.

"},{"location":"user-guide/High-level-Architecture/#outside-mongooseim-ecosystem-in-a-datacenter","title":"Outside MongooseIM: ecosystem in a datacenter","text":""},{"location":"user-guide/High-level-Architecture/#frontend","title":"Frontend","text":"

Native clients on platforms such as Android, iOS, Windows, Linux, macOS, will preferably use a plain XMPP over TCP connections.

Since web clients cannot use TCP connections, they will preferably use XMPP over websockets, or the now less relevant XMPP over BOSH (using long-lived HTTP connections, more and more used as fallback).

Any client could use the client GraphQL API, which is using HTTP request/responses.

All these client connections will hit a frontend load balancer before reaching the MongooseIM cluster.

"},{"location":"user-guide/High-level-Architecture/#backend","title":"Backend","text":"

MongooseIM supports bilateral communication with other backend services in the datacenter infrastructure.

MongooseIM GraphQL API is available for control/management of MongooseIM's operations as well as the functional aspects.

An HTTP notification enables forwarding of the events to any other external HTTP service.

"},{"location":"user-guide/High-level-Architecture/#management-and-monitoring","title":"Management and monitoring","text":"

WombatOAM enables the monitoring and management of MongooseIM clusters, as well as RabbitMQ and any other Erlang and Elixir based system.

"},{"location":"user-guide/High-level-Architecture/#mongooseice-stunturn","title":"MongooseICE (STUN/TURN)","text":"

Available on: MongooseICE

"},{"location":"user-guide/High-level-Architecture/#mongoosepush-apns-gcm","title":"MongoosePush (APNS, GCM)","text":"

Available on: MongoosePush

"},{"location":"user-guide/High-level-Architecture/#mongooseim-in-a-worldwide-multi-datacenter-configuration","title":"MongooseIM in a worldwide, multi-datacenter configuration","text":"

The MongooseIM platform enables a service to scale worldwide, with proximity servers across continents and datacenters. It leverages the use of the open standard S2S (server-to-server) protocol.

We advise contacting us in case of such a big deployment.

"},{"location":"user-guide/Supported-XEPs/","title":"Supported XEPs","text":"XEP Name Version Status Modules 0004 Data Forms 2.13.1 complete mongoose_data_forms 0012 Last Activity 2.0 complete mod_last 0016 Privacy Lists 1.7 complete mod_privacy 0022 Message Events 1.4 complete mod_offline 0023 Message Expiration 1.3 complete mod_offline 0030 Service Discovery 2.5rc3 complete mod_disco 0045 Multi-User Chat 1.34.5 complete mod_muc 0049 Private XML Storage 1.2 complete mod_private 0050 Ad-Hoc Commands 1.3.0 complete adhoc 0054 vcard-temp 1.2 complete mod_vcard 0055 Jabber Search 1.3 complete mod_vcard 0059 Result Set Management 1.0 complete jlib 0060 Publish-Subscribe 1.25.0 complete mod_pubsub 0068 Field Standardization for Data Forms 1.3.0 complete mongoose_data_forms 0077 In-Band Registration 2.4 complete mod_register 0079 Advanced Message Processing 1.2 partial mod_amp 0082 XMPP Date and Time Profiles 1.1.1 complete mod_time 0083 Nested Roster Groups 1.0 complete mod_roster 0085 Chat State Notifications 2.1 complete mod_offline 0086 Error Condition Mappings 1.0 complete jlib 0092 Software Version 1.1 complete mod_version 0093 Roster Item Exchange 1.2 complete mod_roster 0114 Jabber Component Protocol 1.6 complete ejabberd_service 0115 Entity Capabilities 1.6.0 complete mod_caps 0124 Bidirectional-streams Over Synchronous HTTP (BOSH) 1.11.2 complete mod_bosh 0126 Invisibility 1.1 complete mod_privacy 0157 Contact Addresses for XMPP Services 1.1.1 complete mod_disco 0160 Best Practices for Handling Offline Messages 1.0.1 complete mod_offline, mod_offline_chatmarkers 0163 Personal Eventing Protocol 1.2.2 complete mod_pubsub 0170 Recommended Order of Stream Feature Negotiation 1.0 complete mongoose_c2s 0175 Best Practices for Use of SASL ANONYMOUS 1.2 complete cyrsasl_anonymous 0178 Best Practices for Use of SASL EXTERNAL with Certificates 1.2 partial cyrsasl_external 0185 Dialback Key Generation and Validation 1.0 complete mongoose_s2s_dialback 0191 Blocking Command 1.3 complete mod_blocking 0198 Stream Management 1.6.1 complete mod_stream_management 0199 XMPP Ping 2.0.1 complete mod_ping 0202 Entity Time 2.0 complete mod_time 0206 XMPP Over BOSH 1.4 complete mod_bosh 0215 External Service Discovery 1.0.0 complete mod_extdisco 0220 Server Dialback 1.1.1 complete ejabberd_s2s_out, mongoose_s2s_dialback 0237 Roster Versioning 1.3 complete mod_roster 0248 PubSub Collection Nodes 0.3.0 complete mod_pubsub 0249 Direct MUC Invitations 1.2 complete mod_muc 0277 Microblogging over XMPP 0.6.5 complete mod_pubsub 0279 Server IP Check 0.2 complete mod_sic 0280 Message Carbons 1.0.1 complete mod_carboncopy 0313 Message Archive Management 1.1.0 complete mod_mam 0333 Displayed Markers 0.4 complete mod_smart_markers 0352 Client State Indication 1.0.0 complete mod_csi 0357 Push Notifications 0.4.1 complete mod_event_pusher_push 0363 HTTP File Upload 1.1.0 complete mod_http_upload 0384 OMEMO Encryption 0.8.3 complete mod_pubsub 0386 Bind 2 0.4.0 partial mod_bind2 0388 Extensible SASL Profile 0.4.0 partial mod_sasl2 0424 Message Retraction 0.3.0 complete mod_mam"},{"location":"user-guide/Supported-standards/","title":"Supported standards","text":"
  • XMPP Core: RFC 3920, RFC 6120

    Note

    In RFC 6120 there are 3 different strategies defined in case of a session conflict (same full JID). They are described in 7.7.2.2. Conflict. MongooseIM always uses the 3rd option. It terminates the older session with a <conflict/> stream error.

  • XMPP Instant Messaging and Presence: RFC 3921, RFC 6121

  • Client connections:
    • over TCP (with TLS/STARTTLS available) as defined in RFC 6120
    • over WebSockets as defined in RFC 7395
    • over HTTP(S) long-polling (BOSH) as defined in XEP-0124: Bidirectional-streams Over Synchronous HTTP (BOSH) and XEP-0206: XMPP Over BOSH
    • GraphQL API
    • REST API (deprecated)
  • Server/backend connections:
    • GraphQL API
    • REST API (deprecated)
  • Configurable database backends:
    • Transient:
      • Mnesia
      • Redis
    • Persistent:
      • RDBMS: MySQL, PostgreSQL, generic ODBC
      • NoSQL: Cassandra
  • Integration with third-party services
    • Amazon Simple Notification Service
"},{"location":"user-guide/Supported-standards/#supported-xeps","title":"Supported XEPs","text":"XEP Name Version Status Modules 0004 Data Forms 2.13.1 complete mongoose_data_forms 0012 Last Activity 2.0 complete mod_last 0016 Privacy Lists 1.7 complete mod_privacy 0022 Message Events 1.4 complete mod_offline 0023 Message Expiration 1.3 complete mod_offline 0030 Service Discovery 2.5rc3 complete mod_disco 0045 Multi-User Chat 1.34.5 complete mod_muc 0049 Private XML Storage 1.2 complete mod_private 0050 Ad-Hoc Commands 1.3.0 complete adhoc 0054 vcard-temp 1.2 complete mod_vcard 0055 Jabber Search 1.3 complete mod_vcard 0059 Result Set Management 1.0 complete jlib 0060 Publish-Subscribe 1.25.0 complete mod_pubsub 0068 Field Standardization for Data Forms 1.3.0 complete mongoose_data_forms 0077 In-Band Registration 2.4 complete mod_register 0079 Advanced Message Processing 1.2 partial mod_amp 0082 XMPP Date and Time Profiles 1.1.1 complete mod_time 0083 Nested Roster Groups 1.0 complete mod_roster 0085 Chat State Notifications 2.1 complete mod_offline 0086 Error Condition Mappings 1.0 complete jlib 0092 Software Version 1.1 complete mod_version 0093 Roster Item Exchange 1.2 complete mod_roster 0114 Jabber Component Protocol 1.6 complete ejabberd_service 0115 Entity Capabilities 1.6.0 complete mod_caps 0124 Bidirectional-streams Over Synchronous HTTP (BOSH) 1.11.2 complete mod_bosh 0126 Invisibility 1.1 complete mod_privacy 0157 Contact Addresses for XMPP Services 1.1.1 complete mod_disco 0160 Best Practices for Handling Offline Messages 1.0.1 complete mod_offline, mod_offline_chatmarkers 0163 Personal Eventing Protocol 1.2.2 complete mod_pubsub 0170 Recommended Order of Stream Feature Negotiation 1.0 complete mongoose_c2s 0175 Best Practices for Use of SASL ANONYMOUS 1.2 complete cyrsasl_anonymous 0178 Best Practices for Use of SASL EXTERNAL with Certificates 1.2 partial cyrsasl_external 0185 Dialback Key Generation and Validation 1.0 complete mongoose_s2s_dialback 0191 Blocking Command 1.3 complete mod_blocking 0198 Stream Management 1.6.1 complete mod_stream_management 0199 XMPP Ping 2.0.1 complete mod_ping 0202 Entity Time 2.0 complete mod_time 0206 XMPP Over BOSH 1.4 complete mod_bosh 0215 External Service Discovery 1.0.0 complete mod_extdisco 0220 Server Dialback 1.1.1 complete ejabberd_s2s_out, mongoose_s2s_dialback 0237 Roster Versioning 1.3 complete mod_roster 0248 PubSub Collection Nodes 0.3.0 complete mod_pubsub 0249 Direct MUC Invitations 1.2 complete mod_muc 0277 Microblogging over XMPP 0.6.5 complete mod_pubsub 0279 Server IP Check 0.2 complete mod_sic 0280 Message Carbons 1.0.1 complete mod_carboncopy 0313 Message Archive Management 1.1.0 complete mod_mam 0333 Displayed Markers 0.4 complete mod_smart_markers 0352 Client State Indication 1.0.0 complete mod_csi 0357 Push Notifications 0.4.1 complete mod_event_pusher_push 0363 HTTP File Upload 1.1.0 complete mod_http_upload 0384 OMEMO Encryption 0.8.3 complete mod_pubsub 0386 Bind 2 0.4.0 partial mod_bind2 0388 Extensible SASL Profile 0.4.0 partial mod_sasl2 0424 Message Retraction 0.3.0 complete mod_mam"},{"location":"user-guide/Supported-standards/#supported-open-extensions","title":"Supported Open Extensions","text":"Name Module MUC Light mod_muc_light Inbox mod_inbox Token-based reconnection mod_auth_token, mod_keystore MAM extensions mam"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"MongooseIM Documentation","text":"
  • Home: https://github.com/esl/MongooseIM
  • Product page: https://www.erlang-solutions.com/products/mongooseim.html
  • Documentation: https://esl.github.io/MongooseDocs/
  • Try it now: https://trymongoose.im
"},{"location":"#get-to-know-mongooseim","title":"Get to know MongooseIM","text":"

MongooseIM is a robust, scalable and efficient XMPP server at the core of an Instant Messaging platform aimed at large installations.

Designed for enterprise, it is fault-tolerant, can utilise the resources of multiple clustered machines, and easily scales for more capacity by simply adding a box or a VM.

MongooseIM can accept client sessions over vanilla XMPP, GraphQL API, REST API and SSE, as well as Websockets and BOSH (HTTP long-polling).

As a platform, MongooseIM includes several server-side (backend) and client-side (frontend) components. We provide a test suite, metrics, a load testing platform, and a monitoring server. We recommend third-party, open source client libraries for XMPP, GraphQL API and REST API.

MongooseIM is brought to you by Erlang Solutions.

"},{"location":"#mongooseim-platform-components","title":"MongooseIM platform components","text":""},{"location":"#server-side-components","title":"Server-side components","text":"

We offer a set of additional server-side components:

  • WombatOAM is a powerful monitoring platform that comes with a dedicated MongooseIM plugin
  • mongoose_metrics is a internal module that provides various metrics about the server, if you use WombatOAM with MongooseIM plugin then you will see them otherwise you can use e.g. InfluxDB and Grafana to store and visualize them
  • Test suite is written with the help of these useful tools:
    • escalus: an XMPP client for Erlang
    • amoc: a load testing tool
  • MongooseICE: is a STUN and TURN server written for traversing NATs and relaying streams
  • MongoosePush: is a flexible push notification server with APNS and FCM support
"},{"location":"#client-side-components","title":"Client-side components","text":"
  • XMPP client libraries - we recommend the following client libraries:
    • iOS, Objective-C: XMPPFramework
    • Android, Java: Smack
    • Web, JavaScript: Stanza.io, Strophe.js
  • REST API client libraries - we recommend the following client libraries:
    • iOS, Swift: Jayme
    • Android, Java: Retrofit
"},{"location":"#download-packages","title":"Download packages","text":"

For a quick start just download:

  • The pre-built packages that suit your platform (Ubuntu, Debian, CentOS compatible: AlmaLinux and Rocky Linux)
  • The Docker image (source code repository)
  • The Helm chart (source code repository)

See the installation guide for more details.

"},{"location":"#public-testing","title":"Public testing","text":"

Check out our test results:

  • CI testing:
    • GH Actions
    • CircleCI
  • Code coverage:
    • Codecov - reported by CircleCI.
    • Coveralls - reported by GH Actions.
"},{"location":"#versions","title":"Versions","text":"

See the documentation for the latest releases:

  • Master
  • 6.2.1
  • 6.2.0
  • 6.1.0
  • 6.0.0
  • 5.1.0
  • 5.0.0
  • 4.2.0
  • 4.1.0
  • 4.0.1
  • 3.7.1
  • 3.6.2
  • 3.5.0
  • 3.4.1
  • 3.3.0
  • 3.2.0
  • 3.1.1
  • 3.0.1
"},{"location":"#participate","title":"Participate!","text":"

Suggestions, questions, thoughts? Contact us directly:

  • Raise a GitHub issue
  • Email us at mongoose-im@erlang-solutions.com
  • Follow our Twitter account
"},{"location":"Contributions/","title":"Contributions to the Ecosystem","text":"

Our contributions to the ecosystem.

"},{"location":"Contributions/#third-party-opensource-projects","title":"Third-party opensource projects","text":""},{"location":"Contributions/#xmppframework-for-ios","title":"XMPPFramework for iOS","text":"

Available on: robbiehanson/XMPPFramework

  • XEP-0363: HTTP File Upload
  • XEP-0313: Message Archive Management
  • XEP-0030: Service Discovery
  • MUC light
  • Token-based reconnection
  • Revamped README: making people feel like this is a well maintained and up to date framework
  • Created a way to Mock a piece of the framework to improve the way we write tests
"},{"location":"Contributions/#smack-for-android","title":"Smack for Android","text":"

Available on: igniterealtime/Smack

  • XEP-0357: Push Notifications
  • XEP-0191: Blocking Command
  • XEP-0313: Message Archive Management
  • XEP-0308: Last Message Correction
  • MUC light
  • Token-based reconnection
  • Instant Stream Resumption
  • XEP-0231: Bits of Binary
  • XEP-0333: Chat Markers
  • MAM documentation
"},{"location":"Contributions/#movim","title":"Movim","text":"

See movim/movim on GitHub for more details.

  • Docker image for Movim
"},{"location":"Contributions/#software-by-erlang-solutions","title":"Software by Erlang Solutions","text":""},{"location":"Contributions/#escalus","title":"escalus","text":"

See esl/escalus on GitHub for more details.

An XMPP client library in Erlang for conveniently testing XMPP servers

Apache license 2.0

"},{"location":"Contributions/#amoc","title":"amoc","text":"

See esl/amoc on GitHub for more details.

amoc is a simple tool for running massively parallel XMPP tests

Apache license 2.0

Info

amoc stands for \"A Murder of Crows\"

"},{"location":"Contributions/#amoc-arsenal-xmpp","title":"amoc-arsenal-xmpp","text":"

See esl/amoc-arsenal-xmpp on GitHub for more details.

A collection of scenarios for amoc, which we use to test MongooseIM. They can however be used to load test any XMPP server.

Apache license 2.0

"},{"location":"Contributions/#exml","title":"exml","text":"

See esl/exml on GitHub for more details.

XML parsing library in Erlang

Apache license 2.0

"},{"location":"Contributions/#mongooseice-ice-stunturn-server","title":"MongooseICE: ICE (STUN/TURN) server","text":"

See MongooseICE on GitHub for more details.

"},{"location":"Contributions/#mongoosepush-push-notifications-server-apnsfcm","title":"MongoosePush: Push notifications server (APNS/FCM)","text":"

See MongoosePush on GitHub for more details.

"},{"location":"Contributions/#open-standards","title":"Open standards","text":""},{"location":"Contributions/#muc-light","title":"MUC light","text":"

MUC stands for Multi-User Chat. MUC light is a presenceless and subscription-based group chat, relying on a simplified version of MUC.

"},{"location":"Contributions/#token-based-reconnection","title":"Token-based reconnection","text":"

Token-based reconnection (TBR) Reconnection mechanism, for temporary disconnections, using tokens instead of passwords

"},{"location":"History/","title":"MongooseIM history","text":""},{"location":"History/#2023-2024-c2s-and-cets","title":"2023-2024: C2S and CETS","text":"

Created an alternative to Mnesia RAM-only tables - CETS. It allows to run MongooseIM without Mnesia completely in RDBMS+CETS setup.

Moved the C2S implementation to state machine. Added Docker image for arm64.

Enhanced CETS, configurable pools, and traffic shaping updates.

Releases:

  • MongooseIM 6.2.1 in April 2024.
  • MongooseIM 6.2.0 in December 2023.
  • MongooseIM 6.1.0 in May 2023.
"},{"location":"History/#2022-graphql","title":"2022: GraphQL","text":"

New GraphQL API allows to access MongooseIM using HTTP protocol to extract data and make changes in a flexible way. The command-line interface (CLI) has been reworked to match the GraphQL functionality. The configuration for the admin and the client API has been simplified.

Release:

  • MongooseIM 6.0.0 in December 2022.
"},{"location":"History/#2020-2021-friendly-cloud-native-and-dynamic","title":"2020-2021: Friendly, cloud-native and dynamic","text":"

With the new configuration format, improved logging, and many more changes, MongooseIM has become more friendly for DevOps than ever before. This goes hand in hand with the prioritisation of solutions that enable MongooseIM to be easily deployed to the cloud.

Whether in the cloud or on-premise, it is now possible to have a multi-tenant setup, powered by the new dynamic XMPP domains feature. It means thousands of domains can be simply set up, managed, and removed dynamically, without a noticeable performance overhead.

Releases:

  • MongooseIM 5.1.0 in June 2022.
  • MongooseIM 5.0.0 in October 2021.
  • MongooseIM 4.2.0 in April 2021.
  • MongooseIM 4.1.0 in February 2021.
  • MongooseIM 4.0.0 in September 2020.
  • MongooseIM 3.7.0 in May 2020.
  • MongooseIM 3.6.0 in January 2020.
"},{"location":"History/#2018-2019-global-distribution-ready","title":"2018-2019: Global distribution ready","text":"
  • Focus on global scale architecture.
  • Chat bot integrations.
  • Optimizations for IoT clients.
  • GDPR compliance.
  • New XML parser exml.

Releases:

  • MongooseIM 3.5.0 in October 2019.
  • MongooseIM 3.4.0 in June 2019.
  • MongooseIM 3.3.0 in March 2019.
  • MongooseIM 3.2.0 in November 2018.
  • MongooseIM 3.1.1 in July 2018.
  • MongooseIM 3.0.1 in May 2018.
  • MongooseIM 2.2.2 in April 2018.
  • MongooseIM 2.1.1 in January 2018.
"},{"location":"History/#2017-platform-expansion-and-strengthening","title":"2017: Platform expansion and strengthening","text":"

MongooseIM 2.1.0 in October 2017.

New components were added to the MongooseIM platform:

  • MongoosePush, push notifications server
  • MongooseICE, ICE server to help with voice calls functionality
  • Mangosta iOS, demo XMPP client application for iOS
  • Mangosta Android, demo XMPP client application for Android
"},{"location":"History/#2016-pivot-to-fullstack-messaging-platform","title":"2016: Pivot to fullstack messaging platform","text":"

MongooseIM Platform was created, that included a list of components:

  • MongooseIM XMPP server 2.0.0, featuring a unique REST API for client developers and MUC light
  • WombatOAM, for monitoring and operations
  • escalus, an Erlang XMPP client for test automation
  • amoc, for load generation
  • Smack for Android in Java (third party)
  • XMPPFramework for iOS in Objective-C (third party)
  • Retrofit by Square for Android in Java (third party)
  • Jayme by Inaka for iOS in Swift
"},{"location":"History/#2012-2015-fully-independent-project-growing-fast","title":"2012-2015: Fully independent project growing fast","text":"
  • Full OTP and rebar compliance.
  • Removal of obsolete and/or rarely used modules.
  • Reduction of the runtime memory consumption and functional test coverage.
  • Added Message Archive Management support (XEP-0313).

Releases:

  • MongooseIM 1.6.x in October 2015.
  • MongooseIM 1.5.x in December 2014.
  • MongooseIM 1.4.x in May 2014.
  • MongooseIM 1.3.x in January 2014.
  • MongooseIM 1.2.x in May 2013.
  • MongooseIM 1.1.x in December 2012.
  • MongooseIM 1.0.0 in July 2012.
"},{"location":"History/#2011-fork-of-ejabberd","title":"2011: Fork of ejabberd","text":"

This project began its life as a fork of ejabberd v.2.1.8.

Version 0.1.0 included:

  • Replaced strings with binaries to significantly reduce memory consumption.
  • Refactored directory structure of the project to be OTP complient.
  • Replaced autotools with the rebar build tool.
  • Removed obsolete and/or rarely used modules to reduce maintenance burden.
  • Added functional tests based on RFCs and XEPs.
"},{"location":"authentication-methods/anonymous/","title":"Anonymous","text":""},{"location":"authentication-methods/anonymous/#overview","title":"Overview","text":"

This authentication method allows the users to connect anonymously.

"},{"location":"authentication-methods/anonymous/#configuration-options","title":"Configuration options","text":""},{"location":"authentication-methods/anonymous/#authanonymousallow_multiple_connections","title":"auth.anonymous.allow_multiple_connections","text":"
  • Syntax: boolean
  • Default: false
  • Example: allow_multiple_connections = true

When set to true, allows multiple connections from the same JID using the anonymous authentication method.

"},{"location":"authentication-methods/anonymous/#authanonymousprotocol","title":"auth.anonymous.protocol","text":"
  • Syntax: string, one of \"sasl_anon\", \"login_anon\", \"both\"
  • Default: sasl_anon
  • Example: protocol = \"both\"

Specifies the SASL mechanisms supported by the anonymous authentication method:

  • sasl_anon - support only the the ANONYMOUS mechanism,
  • login_anon - support the non-anonymous mechanisms (PLAIN, DIGEST-MD5, SCRAM-*),
  • both - support both types of mechanisms.
"},{"location":"authentication-methods/anonymous/#authanonymousbackend","title":"auth.anonymous.backend","text":"
  • Syntax: string, one of mnesia, cets
  • Default: mnesia
  • Example: backend = cets

Sets the backend where anonymous sessions will be stored in-memory. See internal databases

"},{"location":"authentication-methods/anonymous/#example","title":"Example","text":"
[auth.anonymous]\n  allow_multiple_connections = true\n  protocol = \"both\"\n
"},{"location":"authentication-methods/dummy/","title":"Dummy","text":""},{"location":"authentication-methods/dummy/#overview","title":"Overview","text":"

The purpose of this method is to make it possible to authenticate a user without the need for real authentication. In other words, using this module allows to connect any user to the server without providing any password, certificate, etc.

This kind of authorization sometimes really comes in handy, especially during development and testing.

The backend just accepts every authentication attempt and introduces a random delay (50-500ms) to an authorization response. The delay works like

    timer:sleep(Base + rand:uniform(Variance)),\n
where Base is base_time and Variance is variance, as configured below.

"},{"location":"authentication-methods/dummy/#configuration","title":"Configuration","text":""},{"location":"authentication-methods/dummy/#authdummybase_time","title":"auth.dummy.base_time","text":"
  • Syntax: non-negative integer
  • Default: 50
  • Example: base_time = 5
"},{"location":"authentication-methods/dummy/#authdummyvariance","title":"auth.dummy.variance","text":"
  • Syntax: positive integer
  • Default: 450
  • Example: variance = 10
"},{"location":"authentication-methods/dummy/#example","title":"Example","text":"
[auth.dummy]\n  base = 5\n  variance = 10\n
"},{"location":"authentication-methods/external/","title":"External","text":""},{"location":"authentication-methods/external/#overview","title":"Overview","text":"

This authentication method delegates the authentication to an external script.

It uses the SASL PLAIN mechanism.

"},{"location":"authentication-methods/external/#script-api-specification","title":"Script API specification","text":"

All \"commands\" sent from Erlang VM to the script are prefixed with a 2-byte unsigned integer (command length), MSB first. The script is expected to return responses in the same format.

Currently only 2 response packets are supported:

  • 0x0000 = false (for failure).
  • 0x0001 = true (for success).

The following list describes packets that the script should support.

  • auth:<username>:<domain>:<password> - Check password.
  • setpass:<username>:<domain>:<password> - Set password.
  • tryregister:<username>:<domain>:<password> - Register a user.
  • removeuser:<username>:<domain> - Remove a user.
  • isuser:<username>:<domain> - Check if a user exists.
"},{"location":"authentication-methods/external/#configuration-options","title":"Configuration options","text":""},{"location":"authentication-methods/external/#authexternalprogram","title":"auth.external.program","text":"
  • Syntax: string
  • Default: no default, this option is mandatory for the external authentication method
  • Example: program = \"/usr/bin/auth-script.sh\"

Path to the external authentication program.

"},{"location":"authentication-methods/external/#authexternalinstances","title":"auth.external.instances","text":"
  • Syntax: positive integer
  • Default: 1
  • Example: instances = 2

Specifies the number of workers serving external authentication requests.

"},{"location":"authentication-methods/external/#example","title":"Example","text":"
[auth.external]\n  program = \"/home/user/authenticator\"\n  instances = 5\n
"},{"location":"authentication-methods/http/","title":"HTTP","text":""},{"location":"authentication-methods/http/#overview","title":"Overview","text":"

The purpose of this method is to connect to an external REST API and delegate the authentication operations to it. The component must implement the API described below.

This method can be especially useful when the user database is shared with other services. It fits perfectly when the client application uses a custom authentication token and MongooseIM has to validate it externally.

"},{"location":"authentication-methods/http/#configuration-options","title":"Configuration options","text":"

The auth method uses an outgoing HTTP connection pool called auth, which has to be defined in the outgoing_pools section.

For additional configuration, the following options can be provided in the auth section:

"},{"location":"authentication-methods/http/#authhttpbasic_auth","title":"auth.http.basic_auth","text":"
  • Syntax: string
  • Default: not set
  • Example: basic_auth = \"admin:secret\"

Optional HTTP Basic Authentication in format \"username:password\" - used to authenticate MongooseIM in the HTTP service.

"},{"location":"authentication-methods/http/#example","title":"Example","text":"

Authentication:

[auth.http]\n  basic_auth = \"mongooseim:DzviNQw3qyGJDrJDu+ClyA\"\n

Outgoing pools:

[outgoing_pools.http.auth]\n  connection.host = \"https://auth-service:8000\"\n
"},{"location":"authentication-methods/http/#scram-support","title":"SCRAM support","text":"

The http method can use the SASL SCRAM-* mechanisms. When SCRAM is enabled, the passwords sent to the auth service are serialised and the same serialised format is expected when fetching a password from the component.

It is transparent when MongooseIM is responsible for all DB operations such as password setting, account creation etc.

The service CAN perform the (de)serialization of SCRAM-encoded passwords. You can find more details on the SCRAM serialization page.

"},{"location":"authentication-methods/http/#authentication-service-api","title":"Authentication service API","text":""},{"location":"authentication-methods/http/#url-format","title":"URL format","text":"

All GET requests include the following URL-encoded query string: ?user=<username>&server=<domain>&pass=<password>.

All POST requests have the following URL-encoded string in the request body: user=<username>&server=<domain>&pass=<password>.

If a certain method does not need a password, the value of pass is undefined, so it shouldn't be used.

"},{"location":"authentication-methods/http/#return-codes","title":"Return codes","text":"

For the best integration, the return code range should not exceed the list below:

  • 500 - internal server error
  • 409 - conflict
  • 404 - not found
  • 403 - not allowed
  • 401 - not authorised
  • 400 - other error, should be sent in response body
  • 204 - success, no return data
  • 201 - created
  • 200 - success, return value in response body

Whenever the specification says \"anything else\", service should use one of the codes from the list above.

Some requests consider multiple return codes a \"success\". It is up to the server-side developer to pick one of the codes.

"},{"location":"authentication-methods/http/#http-header-content-length","title":"HTTP header Content-Length","text":"

IMPORTANT: The authentication server MUST include a Content-Length HTTP header in the response. A body can be missing in the first data chunk read from a socket, leading to strange authentication errors.

"},{"location":"authentication-methods/http/#method-register","title":"Method register","text":"
  • Description: Creates a user account.
  • HTTP method: POST
  • Type: mandatory when mod_register is enabled
  • Return values:
    • 201 - success
    • 409 - user already exists
    • anything else - will be treated as failure
"},{"location":"authentication-methods/http/#method-check_password","title":"Method check_password","text":"
  • Description: Must respond if the password is valid for the user.
  • HTTP method: GET
  • Type: mandatory when SCRAM is not used
  • Return values:
    • 200, true or false in the body
    • anything else - will be treated as false
"},{"location":"authentication-methods/http/#method-get_password","title":"Method get_password","text":"
  • Description: Must return the user's password in plaintext or in the SCRAM serialised form.
  • HTTP method: GET
  • Type: mandatory when SCRAM or DIGEST SASL mechanism is used
  • Return values:
    • 200, password in the body
    • anything else - get_password will fail
"},{"location":"authentication-methods/http/#method-get_certs","title":"Method get_certs","text":"
  • Description: Must return all the valid certificates of a user in the PEM format.
  • HTTP method: GET
  • Type: mandatory when EXTERNAL SASL mechanism is used
  • Return values:
    • 200, all the user's certificates listed one after another (as in a PEM file)
    • anything else - get_certs will fail
"},{"location":"authentication-methods/http/#method-user_exists","title":"Method user_exists","text":"
  • Description: Must return the information whether the user exists in DB.
  • HTTP method: GET
  • Type: mandatory
  • Return values:
    • 200, true or false in body
    • anything else - will be treated as false
"},{"location":"authentication-methods/http/#method-set_password","title":"Method set_password","text":"
  • Description: Must set user's password in the internal database to a provided value. The value should not be transformed (except for URL-decoding) before writing into the DB.
  • HTTP method: POST
  • Type: mandatory when mod_register is enabled
  • Return values:
    • 200 or 201 or 204 - success
    • anything else - will be treated as false
"},{"location":"authentication-methods/http/#method-remove_user","title":"Method remove_user","text":"
  • Description: Removes a user account.
  • HTTP method: POST
  • Type: mandatory when mod_register is enabled
  • Return values:
    • 200 or 201 or 204 - success
    • 404 - user does not exist
    • 403 - not allowed for some reason
    • 40X - will be treated as bad request
"},{"location":"authentication-methods/http/#authentication-service-api-recipes","title":"Authentication service API recipes","text":"

Below you can find some examples of the auth service APIs and MongooseIM-side configuration along with use cases.

"},{"location":"authentication-methods/http/#system-using-a-common-custom-auth-token","title":"System using a common, custom auth token","text":"

An Auth token is provided as a password.

  • Service implements: check_password, user_exists
  • MongooseIM config: password.format: plain, mod_register disabled
  • Client side: Must NOT use the DIGEST-MD5 mechanism; use PLAIN instead
"},{"location":"authentication-methods/http/#central-database-of-plaintext-passwords","title":"Central database of plaintext passwords","text":"
  • Service implements: check_password, get_password, user_exists
  • MongooseIM config: password.format: plain, mod_register disabled
  • Client side: May use any available SASL mechanism
"},{"location":"authentication-methods/http/#central-database-able-to-process-scram","title":"Central database able to process SCRAM","text":"
  • Service implements: get_password, user_exists
  • MongooseIM config: password.format: scram, mod_register disabled
  • Client side: May use any available SASL mechanism
"},{"location":"authentication-methods/http/#godlike-mongooseim","title":"Godlike MongooseIM","text":"
  • Service implements: all methods
  • MongooseIM config: password.format: scram (recommended) or plain, mod_register enabled
  • Client side: May use any available SASL mechanism
"},{"location":"authentication-methods/jwt/","title":"JWT","text":""},{"location":"authentication-methods/jwt/#overview","title":"Overview","text":"

This authentication method can verify JSON Web Tokens provided by the clients. A wide range of signature algorithms is supported, including those using public key cryptography.

The module checks the signature and validity of the following parameters:

  • exp - an expired token is rejected,
  • iat - a token must be issued in the past,
  • nbf - a token might not be valid yet.

It requires the SASL PLAIN mechanism listed in sasl_mechanisms.

"},{"location":"authentication-methods/jwt/#configuration-options","title":"Configuration options","text":""},{"location":"authentication-methods/jwt/#authjwtsecret","title":"auth.jwt.secret","text":"
  • Syntax: TOML table with exactly one of the possible items listed below:
    • file - string, path to the file with the JWT secret,
    • env- string, environment variable name with the JWT secret,
    • value - string, the JWT secret value.
  • Default: no default, this option is mandatory
  • Example: secret.env = \"JWT_SECRET\"

This is the JWT secret used for the authentication. You can store it in a file, as an environment variable or specify it directly.

"},{"location":"authentication-methods/jwt/#authjwtalgorithm","title":"auth.jwt.algorithm","text":"
  • Syntax: string, one of: \"HS256\", \"RS256\", \"ES256\", \"HS386\", \"RS386\", \"ES386\", \"HS512\", \"RS512\", \"ES512\"
  • Default: no default, this option is mandatory
  • Example: algorithm = \"HS512\"

Name of the algorithm used to sign the JWT.

"},{"location":"authentication-methods/jwt/#authjwtusername_key","title":"auth.jwt.username_key","text":"
  • Syntax: string
  • Default: no default, this option is mandatory
  • Example: username_key = \"user_name\"

Name of the JWT key that contains the user name to verify.

"},{"location":"authentication-methods/jwt/#example","title":"Example","text":"
[auth.jwt]\n  secret.value = \"top-secret123\"\n  algorithm = \"HS256\"\n  username_key = \"user\"\n
"},{"location":"authentication-methods/ldap/","title":"LDAP","text":""},{"location":"authentication-methods/ldap/#overview","title":"Overview","text":"

This authentication method provides a read-only abstraction over an LDAP directory.

The following SASL mechanisms are supported:

"},{"location":"authentication-methods/ldap/#sasl-external","title":"SASL EXTERNAL","text":"

User credentials are verified by performing an LDAP search with the user name provided by the client. This can be used to verify that the user is allowed to log in after the provided certificate has been verified.

This method requires one connection pool with the default tag (unless you change it with the pool_tag option). You need to provide the root DN and password unless your LDAP password allows anonymous searches.

Example:

[outgoing_pools.ldap.default]\n  workers = 5\n  connection.servers = [\"ldap-server.example.com\"]\n  connection.rootdn = \"cn=admin,dc=example,dc=com\"\n  connection.password = \"ldap-admin-password\"\n

For more details see outgoing connections.

"},{"location":"authentication-methods/ldap/#sasl-plain","title":"SASL PLAIN","text":"

User credentials are verified by performing an LDAP search followed by a bind with the user name and password provided by the client.

To use SASL PLAIN, you need to configure two connection pools:

  • one with the default tag (unless you change it with the pool_tag option) for the search operations (like for SASL EXTERNAL),
  • one with the bind tag (unless you change it with the bind_pool_tag option) for the bind operations - for this one it is not necessary to provide the root DN and password as the bind operations will be performed with users' credentials. This pool has to be used exclusively for the bind operations as the authentication state of the connection changes with each request.

Example:

[outgoing_pools.ldap.default]\n  workers = 5\n  connection.servers = [\"ldap-server.example.com\"]\n  connection.rootdn = \"cn=admin,dc=example,dc=com\"\n  connection.password = \"ldap-admin-password\"\n\n[outgoing_pools.ldap.bind]\n  connection.servers = [\"ldap-server.example.com\"]\n

For more details see outgoing connections.

"},{"location":"authentication-methods/ldap/#configuration-options","title":"Configuration options","text":""},{"location":"authentication-methods/ldap/#authldappool_tag","title":"auth.ldap.pool_tag","text":"
  • Syntax: string
  • Default: \"default\"
  • Example: pool_tag = \"my_pool\"

Specifies the tag for the primary outgoing connection pool for LDAP authentication.

"},{"location":"authentication-methods/ldap/#authldapbind_pool_tag","title":"auth.ldap.bind_pool_tag","text":"
  • Syntax: string
  • Default: \"bind\"
  • Example: bind_pool_tag = \"my_bind_pool\"

Specifies the tag for the secondary outgoing connection pool for LDAP authentication, used for operations requiring the bind operations, such as checking passwords.

"},{"location":"authentication-methods/ldap/#authldapbase","title":"auth.ldap.base","text":"
  • Syntax: string
  • Default: no default, this option is mandatory
  • Example: base = \"ou=Users,dc=example,dc=com\"

LDAP base directory which stores user accounts.

"},{"location":"authentication-methods/ldap/#authldapuids","title":"auth.ldap.uids","text":"
  • Syntax: array of TOML tables with the following content:
    • attr - string, mandatory, name of the attribute
    • format - pattern, default: \"%u\", requires attr
  • Default: [{attr = \"uid\"}]
  • Example: uids = [{attr = \"uid\", format = \"%u@example.org\"}, {attr = \"another_uid\"}]

List of LDAP attributes that contain the user name (user's part of the JID), used to search for user accounts. They are used as alternatives - it is enough if one of them contains the name. By default the whole value of the attribute is expected to be the user name. If this is not the case, use the format option. It must contain one and only one pattern variable %u which will be replaced by the user name.

"},{"location":"authentication-methods/ldap/#authldapfilter","title":"auth.ldap.filter","text":"
  • Syntax: string
  • Default: not set
  • Example: filter = \"(&(objectClass=shadowAccount)(memberOf=Jabber Users))\"

An additional LDAP filter used to narrow down the search for user accounts. Do not forget to close the brackets and do not use superfluous whitespaces as this expression is processed before sending to LDAP - the match for user name (see ldap.uids) is added automatically.

"},{"location":"authentication-methods/ldap/#authldapdn_filter","title":"auth.ldap.dn_filter","text":"
  • Syntax: TOML table with the following content:
    • filter - string (LDAP filter), mandatory
    • attributes - array of strings (attribute names)
  • Default: not set
  • Example: dn_filter = {filter = \"(&(name=%s)(owner=%D)(user=%u@%d))\", attributes = [\"sn\"]}

This filter is applied to the results returned by the main filter. It performs an additional LDAP lookup to provide the complete result. This is useful when you are unable to define all filter rules in ldap.filter. You can define %u, %d, %s and %D pattern variables in the filter:

  • %u is replaced by the user\u2019s part of a JID,
  • %d is replaced by the corresponding domain (virtual host),
  • %s variables are consecutively replaced by values of the attributes listen as attributes
  • %D is replaced by the Distinguished Name.

Since this filter makes additional LDAP lookups, use it only as the last resort; try to define all filter rules in ldap.filter if possible.

"},{"location":"authentication-methods/ldap/#authldaplocal_filter","title":"auth.ldap.local_filter","text":"
  • Syntax: TOML table with the following content:
    • operation - string, mandatory, \"equal\" or \"notequal\"
    • attribute - string, mandatory, LDAP attribute
    • values - array of strings (attribute values)
  • Default: not set
  • Example: local_filter = {operation = \"equal\", attribute = \"accountStatus\", values = [\"enabled\"]}

If you can\u2019t use the ldap.filter due to performance reasons (the LDAP server has many users registered), you can use this local filter. The local filter checks an attribute in MongooseIM, not in LDAP, so this limits the load on the LDAP directory.

The example above shows a filter which matches accounts with the \"enabled\" status. Another example is shown below - it matches any account that is neither \"disabled\" nor \"blacklisted\". It also shows the usage of TOML dotted keys, which is recommended when the inline table grows too big.

   local_filter.operation = \"notequal\"\n   local_filter.attribute = \"accountStatus\"\n   local_filter.values = [\"disabled\", \"blacklisted\"]\n
"},{"location":"authentication-methods/ldap/#authldapderef","title":"auth.ldap.deref","text":"
  • Syntax: string, one of: \"never\", \"always\", \"finding\", \"searching\"
  • Default: \"never\"
  • Example: deref = \"always\"

Specifies whether or not to dereference aliases: finding means to dereference only when finding the base and searching - only when performing the LDAP search. See the documentation on LDAP search operation for more information.

"},{"location":"authentication-methods/ldap/#example","title":"Example","text":"
[auth.ldap]\n  base = \"ou=Users,dc=example,dc=com\"\n  filter = \"(objectClass=inetOrgPerson)\"\n
"},{"location":"authentication-methods/pki/","title":"PKI","text":""},{"location":"authentication-methods/pki/#overview","title":"Overview","text":"

This is a simple authentication method, meant to be used with the SASL EXTERNAL mechanism. It simply accepts all usernames as long as they are validated by the SASL logic.

Warning

Some of its callbacks return hardcoded values, as it's impossible for this backend to properly acquire certain pieces of information. These include:

Function Hardcoded value Explanation does_user_exist true PKI reponds with true to modules checking if user's interlocutor actually exists so e.g. messages to nonexistent users will always be stored by mod_mam. This is not necessarily a security threat but something to be aware of. dirty_get_registered_users, get_vh_registered_users, get_vh_registered_users_number [] Any metrics or statistics (e.g. available via mongooseimctl) related to accounts list or numbers, won't display proper values, as this backend cannot possibly \"know\" how many users there are."},{"location":"authentication-methods/pki/#configuration-options","title":"Configuration options","text":"

None.

"},{"location":"authentication-methods/pki/#example","title":"Example","text":"
[auth.pki]\n
"},{"location":"authentication-methods/rdbms/","title":"RDBMS","text":""},{"location":"authentication-methods/rdbms/#overview","title":"Overview","text":"

This authentication method stores user accounts in a relational database, e.g. MySQL or PostgreSQL.

"},{"location":"authentication-methods/rdbms/#configuration-options","title":"Configuration options","text":"

The rdbms method uses an outgoing connection pool of type rdbms with the default tag - it has to be defined in the outgoing_pools section.

"},{"location":"authentication-methods/rdbms/#authrdbmsusers_number_estimate","title":"auth.rdbms.users_number_estimate","text":"
  • Syntax: boolean
  • Default: false
  • Example: users_number_estimate = true

By default querying MongooseIM for the number of registered users uses the SELECT COUNT query, which might be slow. Enabling this option makes MongooseIM use an alternative query that might be not as accurate, but is always fast.

Note

This option is effective only for MySQL and PostgreSQL.

"},{"location":"authentication-methods/rdbms/#example","title":"Example","text":"

Authentication:

[auth.rdbms]\n  users_number_estimate = true\n

Outgoing pools:

[outgoing_pools.rdbms.default.connection]\n  driver = \"pgsql\"\n  host = \"localhost\"\n  database = \"mongooseim\"\n  username = \"mongooseim\"\n  password = \"mongooseim_secret\"\n
"},{"location":"configuration/Erlang-cookie-security/","title":"Erlang Cookie Security","text":"

In order for MongooseIM nodes to communicate with each other, they have to share a common secret - i.e. a cookie - which is a feature of the underlying Erlang VM. The cookie itself is an UTF-8 string that is up to 255 characters in size. Thanks to the cookie, MongooseIM nodes can determine if they are allowed to communicate with each other and with no cookie no communication would flow between the nodes - a feature especially useful when you are running more than one applications on a single machine.

For ease of deployment and staging, each MongooseIM node is configured with a predefined erlang cookie. However, one should remember that for production environments this cookie should be reconfigured to a new secret cookie, as this will secure your system from intrusion. You can change the cookie by changing the parameters of the -setcookie parameter in the vm.args file.

Nonetheless, one should remember that communication between Erlang nodes is unencrypted by default, hence, the cookie is vulnerable to sniffing. If one has access to a MongooseIM cookie and figures out the hostname of a node, one can execute shell commands remotely on that node. Therefore, one should either provide privacy at the network layer (strongly recommended) or disable port 4369 for ultimate security.

"},{"location":"configuration/Modules/","title":"Options: Extension Modules","text":"

MongooseIM provides a wide range of pluggable and configurable modules, that implement various features including XEPs. For instance mod_muc enables Multi-User Chat (group chat), mod_mam gives us Message Archive Management, and mod_stream_management is for stanza acknowledgement and stream resumption. This modular architecture provides great flexibility for everyday operations and feature development.

A module configuration generally looks like this:

[modules.mod_muc]\n  host = \"muc.@HOST@\"\n  access = \"muc\"\n  access_create = \"muc_create\"\n

"},{"location":"configuration/Modules/#iq-processing-policies","title":"IQ processing policies","text":"

Some of the modules feature an iqdisc parameter. It defines the method for handling incoming IQ stanzas.

The server may use one of the following strategies to handle incoming IQ stanzas:

"},{"location":"configuration/Modules/#modulesiqdisctype","title":"modules.*.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", or \"parallel\"
  • Example: iqdisc.type = \"one_queue\"

Note

In the \"queues\" case alone, the following key becomes mandatory:

"},{"location":"configuration/Modules/#modulesiqdiscworkers","title":"modules.*.iqdisc.workers","text":"
  • Syntax: positive integer
  • Example: iqdisc.workers = 50

Their semantics works as follows:

  • no_queue registers a new IQ handler, which will be called in the context of the process serving the connection on which the IQ arrives.
  • one_queue spawns a new process by which the incoming IQ stanzas will be handled.
  • queues spawns N worker processes, as provided by the iqdisc.workers key. Every incoming stanza will be then handled by one of those processes.
  • parallel registers the handler without spawning any process: a new process will be spawned in place, for each incoming stanza.
"},{"location":"configuration/Modules/#modules-list","title":"Modules list","text":""},{"location":"configuration/Modules/#mod_adhoc","title":"mod_adhoc","text":"

Implements XEP-0050: Ad-Hoc Commands for advertising and executing application-specific commands, such as those related to a configuration workflow, using XEP-0004: Data Forms in order to structure the information exchange. This is extremely useful for use cases such as remote administration, user engagement via polls, and ChatBots.

"},{"location":"configuration/Modules/#mod_amp","title":"mod_amp","text":"

Implements a subset of XEP-0079: Advanced Message Processing functionality, that enables entities to request, and servers to perform advanced processing of XMPP message stanzas, including reliable data transport, time-sensitive delivery, and expiration of transient messages.

"},{"location":"configuration/Modules/#mod_auth_token","title":"mod_auth_token","text":"

A module used by SASL X-OAUTH mechanism. It provides an API to manage custom OAuth tokens. It requires mod_keystore as an actual key database.

"},{"location":"configuration/Modules/#mod_blocking","title":"mod_blocking","text":"

Implements XEP-0191: Blocking Command, a simplified interface to privacy lists.

"},{"location":"configuration/Modules/#mod_bind2","title":"mod_bind2","text":"

Implements XEP-0386: Bind 2.

"},{"location":"configuration/Modules/#mod_bosh","title":"mod_bosh","text":"

Allows users to connect to MongooseIM using BOSH (Bidirectional-streams Over Synchronous HTTP), the HTTP long-polling technique described in XEP-0124: Bidirectional-streams Over Synchronous HTTP (BOSH) and XEP-0206: XMPP Over BOSH.

"},{"location":"configuration/Modules/#mod_caps","title":"mod_caps","text":"

Implements XEP-0115: Entity Capabilities. It queries clients for their supported functionalities and caches them in Mnesia. This module tightly cooperates with mod_pubsub in order to deliver PEP events to user's subscribers.

"},{"location":"configuration/Modules/#mod_carboncopy","title":"mod_carboncopy","text":"

Implements XEP-0280: Message Carbons in order to keep all IM clients for a user engaged in a real-time conversation by carbon-copying all inbound and outbound messages to all interested resources (Full JIDs).

"},{"location":"configuration/Modules/#mod_csi","title":"mod_csi","text":"

Enables the XEP-0352: Client State Indication functionality.

"},{"location":"configuration/Modules/#mod_disco","title":"mod_disco","text":"

Implements XEP-0030: Service Discovery for discovering information (capabilities, protocols, features) about other XMPP entities.

"},{"location":"configuration/Modules/#mod_event_pusher","title":"mod_event_pusher","text":"

A framework module to build other notification-based modules on.

"},{"location":"configuration/Modules/#mod_event_pusher_sns","title":"mod_event_pusher_sns","text":"

Allows sending online/offline notifications, chat and groupchat messages as events to Amazon Simple Notification Service.

"},{"location":"configuration/Modules/#mod_event_pusher_rabbit","title":"mod_event_pusher_rabbit","text":"

Allows sending presence changes (to available/unavailable), chat and groupchat messages as events to a RabbitMQ server.

"},{"location":"configuration/Modules/#mod_event_pusher_push","title":"mod_event_pusher_push","text":"

Implements XEP-0357: Push Notifications to provide push notifications to clients that are temporary unavailable.

"},{"location":"configuration/Modules/#mod_event_pusher_http","title":"mod_event_pusher_http","text":"

Forward events to an external HTTP service. This applies to situations such as sending messages or presences to mobile/SMS/email push service, big data, or an analytics service.

"},{"location":"configuration/Modules/#mod_extdisco","title":"mod_extdisco","text":"

Implements XEP-0215: External Service Discovery for discovering information about services external to the XMPP network. The main use-case is to help discover STUN/TURN servers to allow for negotiating media exchanges.

"},{"location":"configuration/Modules/#mod_http_upload","title":"mod_http_upload","text":"

Implements XEP-0363: HTTP File Upload for coordinating with an XMPP server to upload files via HTTP and receive URLs that can be shared in messages.

"},{"location":"configuration/Modules/#mod_inbox","title":"mod_inbox","text":"

Implements custom inbox XEP

"},{"location":"configuration/Modules/#mod_global_distrib","title":"mod_global_distrib","text":"

Enables sharing a single XMPP domain between distinct datacenters (experimental).

"},{"location":"configuration/Modules/#mod_jingle_sip","title":"mod_jingle_sip","text":"

Enables Jingle to SIP and SIP to Jingle translator.

"},{"location":"configuration/Modules/#mod_keystore","title":"mod_keystore","text":"

Serves as a storage for crypto keys for mod_auth_token.

"},{"location":"configuration/Modules/#mod_last","title":"mod_last","text":"

Implements XEP-0012: Last Activity for communicating information about the last activity associated with an XMPP entity (most recent presence information from an offline contact).

"},{"location":"configuration/Modules/#mod_mam","title":"mod_mam","text":"

Implements XEP-0313: Message Archive Management, that defines a protocol to query and control an archive of messages stored on a server.

"},{"location":"configuration/Modules/#mod_muc","title":"mod_muc","text":"

Implements XEP-0045: Multi-User Chat, for a featureful multi-user text chat (group chat), whereby multiple XMPP users can exchange messages in the context of a chat room. It is tightly coupled with user presence in chat rooms.

"},{"location":"configuration/Modules/#mod_muc_log","title":"mod_muc_log","text":"

Implements a logging subsystem for mod_muc.

"},{"location":"configuration/Modules/#mod_muc_light","title":"mod_muc_light","text":"

Implements XEP Multi-User Chat Light.

"},{"location":"configuration/Modules/#mod_offline","title":"mod_offline","text":"

Provides an offline messages storage that is compliant with XEP-0160: Best Practices for Handling Offline Messages.

"},{"location":"configuration/Modules/#mod_offline_stub","title":"mod_offline_stub","text":"

Prevents <service-unavailable/> error when the message recipient is offline.

"},{"location":"configuration/Modules/#mod_ping","title":"mod_ping","text":"

Implements XEP-0199: XMPP Ping, enabling periodic XMPP pings sent to clients and responds to those sent from clients.

"},{"location":"configuration/Modules/#mod_privacy","title":"mod_privacy","text":"

This module implements XEP-0016: Privacy Lists, for enabling or disabling communication with other entities on a network.

"},{"location":"configuration/Modules/#mod_private","title":"mod_private","text":"

Implements XEP-0049: Private XML Storage to store and query private user data in XML format.

"},{"location":"configuration/Modules/#mod_pubsub","title":"mod_pubsub","text":"

This extension implements XEP-0060: Publish-Subscribe. It is a pluggable implementation using behaviours provided by node_*.erl and nodetree_*.erl modules.

"},{"location":"configuration/Modules/#mod_push_service_mongoosepush","title":"mod_push_service_mongoosepush","text":"

Handles push notifications generated by mod_pubsub's node_push and passes them to MongoosePush service.

"},{"location":"configuration/Modules/#mod_register","title":"mod_register","text":"

Implements XEP-0077: In-Band Registration, that enables creating an account and changing the password once connected. This does not provide a solution to the forgotten password use case via SMS or email.

"},{"location":"configuration/Modules/#mod_roster","title":"mod_roster","text":"

Roster support, specified in RFC 6121. Includes support for XEP-0237: Roster Versioning.

"},{"location":"configuration/Modules/#mod_sasl2","title":"mod_sasl2","text":"

Implements XEP-0388: Extensible SASL Profile.

"},{"location":"configuration/Modules/#mod_shared_roster_ldap","title":"mod_shared_roster_ldap","text":"

This module, when enabled, will inject roster entries fetched from LDAP.

"},{"location":"configuration/Modules/#mod_sic","title":"mod_sic","text":"

Implements XEP-0279: Server IP Check that enables a client to discover its external IP address.

"},{"location":"configuration/Modules/#mod_stream_management","title":"mod_stream_management","text":"

Enables XEP-0198: Stream Management functionality that defines the active management of an XML stream between two XMPP entities, including features for stanza acknowledgements and stream resumption.

"},{"location":"configuration/Modules/#mod_time","title":"mod_time","text":"

XEP-0202: Entity Time implementation. With this extensions, clients can get the current server time.

"},{"location":"configuration/Modules/#mod_vcard","title":"mod_vcard","text":"

Provides support for vCards, as specified in XEP-0054: vcard-temp and XEP-0055: Jabber Search.

"},{"location":"configuration/Modules/#mod_version","title":"mod_version","text":"

This module provides the functionality specified in XEP-0092: Software Version.

"},{"location":"configuration/Modules/#modules-incompatible-with-dynamic-domains","title":"Modules incompatible with dynamic domains","text":"

There are some modules that don't support dynamic domains for now. These must not be enabled when using host types in modules or host_config.modules sections:

  • mod_event_pusher
  • mod_global_distrib
  • mod_jingle_sip
  • mod_pubsub
  • mod_push_service_mongoosepush
  • mod_shared_roster_ldap

Please note, that s2s and the XMPP components (XEP-0114) mechanism, as configured in the listen.service section, do not support dynamic domains as well.

"},{"location":"configuration/Services/","title":"Options: Services","text":"

Some functionalities in MongooseIM are provided by \"services\". A service is similar to a module, but while a module is started for every host type and may have global or specific configuration, a service is started only once with global configuration. Currently, three modules are categorised as \"service providers\". Eventually the modules which are not specific for a host type will be refactored to be services.

  • Syntax: Each service is specified in its own services.* section.
  • Default: None - each service needs to be enabled explicitly. Typical services are already specified in the example configuration file.
  • Example: A configuration of the service_domain_db service.
[services.service_domain_db]\n  event_cleaning_interval = 1000\n  event_max_age = 5000\n
"},{"location":"configuration/Services/#service_mongoose_system_metrics","title":"service_mongoose_system_metrics","text":"

MongooseIM system metrics are being gathered to analyse the trends and needs of our users, improve MongooseIM, and get to know where to focus our efforts. See System Metrics Privacy Policy for more details.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricsreport","title":"services.service_mongoose_system_metrics.report","text":"
  • Syntax: boolean
  • Default: not specified
  • Example: report = true

An explicit acknowledgement that the metrics are gathered and reported. When this option is not specified, the reports are gathered, and a notification appears in logs on startup. Enabling this option silences the notification reminder that metrics are gathered. When this option is set to false, System Metrics Service is not started and metrics are not collected.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricsintial_report","title":"services.service_mongoose_system_metrics.intial_report","text":"
  • Syntax: non-negative integer
  • Default: 300_000 (milliseconds - 5 minutes).
  • Example: intial_report = 300_000

Time delay counted when the service is started after which the first metrics report is created and sent.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricsperiodic_report","title":"services.service_mongoose_system_metrics.periodic_report","text":"
  • Syntax: non-negative integer
  • Default: 108_000_000 (milliseconds - 3 hours)
  • Example: periodic_report = 108_000_000

Time delay for a periodic update report to be created and sent.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricstracking_idid","title":"services.service_mongoose_system_metrics.tracking_id.id:","text":"
  • Syntax: string
  • Default: no default.
  • Example: tracking_id.id = \"G-123456789\"

Tracking ID to forward the reported metrics so that they can be viewed in the Google Analytics dashboard.

"},{"location":"configuration/Services/#servicesservice_mongoose_system_metricstracking_idsecret","title":"services.service_mongoose_system_metrics.tracking_id.secret:","text":"
  • Syntax: string
  • Default: no default.
  • Example: tracking_id.secret = \"Secret\"

Removing the services.service_mongoose_system_metrics entry will result in the service not being started. Metrics will not be collected and shared. It will generate a notification that the feature is not being used. The notification can be silenced by setting the no_report option explicitly.

"},{"location":"configuration/Services/#service_domain_db","title":"service_domain_db","text":"

This service is needed to use the dynamic domains API. It is used to synchronise dynamic domains between nodes after starting.

"},{"location":"configuration/Services/#servicesservice_domain_dbdb_pool","title":"services.service_domain_db.db_pool","text":"
  • Syntax: string
  • Default: global
  • Example: db_pool = \"my_host_type\"

By default, this service uses the RDBMS connection pool configured with the scope \"global\". You can put a specific host type there to use the default pool with the host_type scope for that particular host type. See the outgoing connections docs for more information about pool scopes.

"},{"location":"configuration/Services/#servicesservice_domain_dbevent_cleaning_interval","title":"services.service_domain_db.event_cleaning_interval","text":"
  • Syntax: positive integer
  • Default: 1800 (seconds - 30 minutes)
  • Example: event_cleaning_interval = 1800

The number of seconds between cleaning attempts of the domain_events table.

"},{"location":"configuration/Services/#servicesservice_domain_dbevent_max_age","title":"services.service_domain_db.event_max_age","text":"
  • Syntax: positive integer
  • Default: 7200 (seconds - 2 hours)
  • Example: event_max_age = 7200

The number of seconds after an event must be deleted from the domain_events table.

"},{"location":"configuration/Services/#example-configuration","title":"Example configuration","text":"
[services.service_mongoose_system_metrics]\n  report = true\n  initial_report = 300_000\n  periodic_report = 108_000_000\n  tracking_id.id = \"G-123456789\"\n  tracking_id.secret = \"Secret\"\n\n[services.service_domain_db]\n  db_pool = \"global\"\n  event_cleaning_interval = 1800\n  event_max_age = 7200\n
"},{"location":"configuration/TLS-hardening/","title":"TLS Hardening","text":""},{"location":"configuration/TLS-hardening/#otp-tls-vs-fast-tls","title":"OTP TLS vs. Fast TLS","text":"

Before we explain the TLS hardening in MongooseIM, we need to describe the TLS libraries used in the project. These are \"OTP TLS\" and \"Fast TLS\".

The former is provided by (as the name suggests) OTP as the ssl application. Large part of the logic is implemented in Erlang but it calls OpenSSL API for some operations anyway.

The latter is a community-maintained driver, which is implemented as NIFs (native C code). It uses OpenSSL API for all operations.

Most MongooseIM components use the TLS library provided by OTP. However, some of them choose to integrate with fast_tls library instead. The former one is used primarily by MIM dependencies, while the latter is used only by MIM modules.

None of them is strictly better than the other. Below you may find a summary of the differences between them.

  • fast_tls is faster
  • There are options that OTP TLS (a.k.a just_tls in the C2S listener configuration) supports exclusively:
    • Immediate connection drop when the client certificate is invalid
    • Certificate Revocation Lists
    • More flexible certificate verification options
  • Allowed protocol versions may be configured:
    • Globally for OTP TLS via an environment variable
    • Per socket in Fast TLS via OpenSSL cipher string
"},{"location":"configuration/TLS-hardening/#deprecations","title":"Deprecations","text":"

MongooseIM is configured to allow only TLS 1.2 or higher, due to known vulnerabilities in TLS 1.0 and 1.1. It is still possible to enable earlier versions, however it is strongly discouraged.

"},{"location":"configuration/TLS-hardening/#otp-tls-hardening","title":"OTP TLS hardening","text":"

Protocol list for OTP TLS is set via the protocol_version environment variable. It's an Erlang runtime variable, so it is not configured in the OS but rather in theapp.config file. It may be found in etc/ folder inside MongooseIM release and in [repository root]/rel/files/.

In order to change the list, please find the following lines:

{protocol_version, ['tlsv1.2',\n                    'tlsv1.3'\n          ]}\n

The remaining valid values are: 'tlsv1.1', tlsv1, sslv3.

This setting affects the following MongooseIM components:

  • Raw XMPP over TCP connections, if a C2S listener is configured to use just_tls
  • All outgoing connections (databases, AMQP, SIP etc.)
  • HTTP endpoints
"},{"location":"configuration/TLS-hardening/#fast-tls-hardening","title":"Fast TLS hardening","text":"

Fast TLS expects an OpenSSL cipher string as one of optional connection parameters. This string is configured individually for every module that uses it. By default, MongooseIM sets this option to TLSv1.2:TLSv1.3 for each component.

The list below enumerates all components that use Fast TLS and describes how to change this string.

  • listen.c2s - main user session abstraction + XMPP over TCP listener
    • Please consult the respective section in Listener modules.
  • listen.s2s - incoming S2S connections (XMPP Federation)
    • Please consult the respective section in Listener modules.
  • s2s - outgoing S2S connections (XMPP Federation)
    • Please check the documentation for s2s_ciphers option.
  • mod_global_distrib - Global Distribution module
    • Please add connections.tls.ciphers = \"string\" to modules.mod_global_distrib module, as described in the documentation.
"},{"location":"configuration/access/","title":"Options: Access","text":"

The access section is used to define access rules which return specific values for specific access classes.

  • Syntax: each access rule is a key-value pair, where:
    • Key is the name of the rule,
    • Value is a TOML array of rule clauses - TOML tables, whose format is described below.
  • Default: no default - each access rule needs to be specified explicitly.
  • Example: see the examples below.
"},{"location":"configuration/access/#access-rule-clauses","title":"Access rule clauses","text":"

Whenever a rule is checked to obtain the resulting value for a user, the clauses are traversed one by one until a matching one is found or the list is exhausted (in which case the special value deny is returned).

Each clause has to contain the following keys:

"},{"location":"configuration/access/#accessacl","title":"access.*.acl","text":"
  • Syntax: string
  • Example: acl = \"local\"

The access class defined in the acl section. The user is matched against it. The special name all is a catch-all value that matches any user. If the class does not exist, the clause does not match (there is no error).

"},{"location":"configuration/access/#accessvalue","title":"access.*.value","text":"
  • Syntax: string or integer
  • Example: value = \"allow\"

For rules determining access, the value will be \"allow\" or \"deny\". For other rules it can be an integer or a string.

"},{"location":"configuration/access/#rule-examples","title":"Rule examples","text":"

The following access rules are already defined in the example configuration file.

"},{"location":"configuration/access/#c2s-access","title":"C2S Access","text":"

The c2s rule is used to allow/deny the users to establish C2S connections:

  c2s = [\n    {acl = \"blocked\", value = \"deny\"},\n    {acl = \"all\", value = \"allow\"}\n  ]\n

It has the following logic:

  • if the access class is blocked, the returned value is \"deny\",
  • otherwise, the returned value is \"allow\".

The blocked access class can be defined in the acl section and match blacklisted users.

For this rule to take effect, it needs to be referenced in the options of a C2S listener.

"},{"location":"configuration/access/#c2s-shaper","title":"C2S Shaper","text":"

The c2s_shaper rule is used to determine the shaper used to limit the incoming traffic on C2S connections:

  c2s_shaper = [\n    {acl = \"admin\", value = \"none\"},\n    {acl = \"all\", value = \"normal\"}\n  ]\n

It has the following logic:

  • if the access class is admin, the returned value is \"none\",
  • otherwise, the returned value is \"normal\".

The admin access class can be defined in the acl to specify admin users who will bypass the normal shaper.

For this rule to take effect, it needs to be referenced in the options of a C2S listener.

"},{"location":"configuration/access/#s2s-shaper","title":"S2S Shaper","text":"

The s2s_shaper rule is used to determine the shaper used to limit the incoming traffic on C2S connections:

  s2s_shaper = [\n    {acl = \"all\", value = \"fast\"}\n  ]\n

It assigns the fast shaper to all S2S connections.

For this rule to take effect, it needs to be referenced in the options of an S2S listener.

"},{"location":"configuration/access/#muc","title":"MUC","text":"

The following rules manage the permissions of MUC operations:

  muc_admin = [\n    {acl = \"admin\", value = \"allow\"}\n  ]\n\n  muc_create = [\n    {acl = \"local\", value = \"allow\"}\n  ]\n\n  muc = [\n    {acl = \"all\", value = \"allow\"}\n  ]\n

They are referenced in the options of the mod_muc module.

"},{"location":"configuration/access/#registration","title":"Registration","text":"

This rule manages the permissions to create new users with mod_register.

  register = [\n    {acl = \"all\", value = \"allow\"}\n  ]\n

It needs to be referenced in the options of the mod_register module.

"},{"location":"configuration/access/#mam-permissions","title":"MAM permissions","text":"

These rules set the permissions for MAM operations triggered by IQ stanzas and handled by the mod_mam module.

  mam_set_prefs = [\n    {acl = \"all\", value = \"default\"}\n  ]\n\n  mam_get_prefs = [\n    {acl = \"all\", value = \"default\"}\n  ]\n\n  mam_lookup_messages = [\n    {acl = \"all\", value = \"default\"}\n  ]\n

They can return \"allow\", \"deny\" or \"default\". The last value uses the default setting for the operation, which is to allow the operation when the sender and recipient JID's are the same.

MAM for MUC permissions has muc_ prefix:

  muc_mam_set_prefs = [\n    {acl = \"all\", value = \"default\"}\n  ]\n\n  muc_mam_get_prefs = [\n    {acl = \"all\", value = \"default\"}\n  ]\n\n  muc_mam_lookup_messages = [\n    {acl = \"all\", value = \"default\"}\n  ]\n
"},{"location":"configuration/access/#mam-shapers","title":"MAM shapers","text":"

These rules limit the rate of MAM operations triggered by IQ stanzas.

  mam_set_prefs_shaper = [\n    {acl = \"all\", value = \"mam_shaper\"}\n  ]\n\n  mam_get_prefs_shaper = [\n    {acl = \"all\", value = \"mam_shaper\"}\n  ]\n\n  mam_lookup_messages_shaper = [\n    {acl = \"all\", value = \"mam_shaper\"}\n  ]\n\n  mam_set_prefs_global_shaper = [\n    {acl = \"all\", value = \"mam_global_shaper\"}\n  ]\n\n  mam_get_prefs_global_shaper = [\n    {acl = \"all\", value = \"mam_global_shaper\"}\n  ]\n\n  mam_lookup_messages_global_shaper = [\n    {acl = \"all\", value = \"mam_global_shaper\"}\n  ]\n

For each operation there are two rules:

  • *_shaper - limits the number of operations per user connection per second,
  • *_global_shaper - limits the number of operations per server node per second.

The values returned by the rules (mam_shaper, mam_global_shaper) are shaper names, which need to be defined in the shaper section.

MAM for MUC shapers has muc_ prefix.

"},{"location":"configuration/access/#maximum-number-of-sessions","title":"Maximum number of sessions","text":"

The max_user_sessions rule is used to determine the maximum number of sessions a user can open.

  max_user_sessions = [\n    {acl = \"all\", value = 10}\n  ]\n

By default, all users can open at most 10 concurrent sessions.

"},{"location":"configuration/access/#maximum-number-of-offline-messages","title":"Maximum number of offline messages","text":"

The max_user_offline_messages rule is used to determine the maximum number of messages that is stored for a user by the mod_offline module.

  max_user_offline_messages = [\n    {acl = \"admin\", value = 5000},\n    {acl = \"all\", value = 100}\n  ]\n

It has the following logic:

  • if the access class is admin, the returned value is 5000,
  • otherwise, the returned value is 100.

This means that the admin users can have 5000 messages stored offline, while the others can have at most 100. The admin access class can be defined in the acl section.

"},{"location":"configuration/access/#for-developers","title":"For developers","text":"

To access the rule functionality, one has to use the acl:match_rule/3 function.

Given the following rule:

  register = [\n    {acl = \"all\", value = \"deny\"}\n  ]\n

One can call:

acl:match_rule(<<\"localhost\">>, register, jid:make(<<\"p\">>, <<\"localhost\">>, <<>>)).

Which in our case will return deny. If the rule is not host specific, one can use global instead of <<\"localhost\">>.

"},{"location":"configuration/acl/","title":"Options: Acl","text":"

The acl section is used to define access classes to which the connecting users are assigned. These classes are used in access rules.

  • Syntax: each access class is a key-value pair, where:
    • Key is the name of the access class,
    • Value is a TOML array of patterns - TOML tables, whose format is described below.
  • Default: no default - each access class needs to be specified explicitly.
  • Example: the local access class is used for the regular users connecting to the C2S listener. The pattern {} matches all users from the current server, because it is equivalent to {match = \"current_domain\"} (see below).
  local = [{}]\n

When there are multiple patterns listed, the resulting pattern will be the union of all of them.

"},{"location":"configuration/acl/#patterns","title":"Patterns","text":"

Each pattern consists of one or more conditions, specified with the options listed below. All defined conditions need to be satisfied for the pattern to be matched successfully.

"},{"location":"configuration/acl/#aclmatch","title":"acl.*.match","text":"
  • Syntax: string, one of: \"all\", \"current_domain\", \"any_hosted_domain\", \"none\"
  • Default: \"current_domain\"
  • Example: match = \"all\"

By default only users from the current domain (the one of the server) are matched. Setting it to \"any_hosted_domain\" results in matching users from all domains hosted by this server. You can also set this option to \"all\", extending the pattern to users from external domains. This option makes a difference for some access rules, e.g. MAM, MUC and registration ones. Setting the option to \"none\" makes the pattern never match.

  everyone = [\n    {match = \"all\"}\n  ]\n
"},{"location":"configuration/acl/#acluser","title":"acl.*.user","text":"
  • Syntax: string
  • Example: user = \"admin\"

Matches all JIDs with the specified user name. The following class includes alice@localhost, but not bob@localhost:

  admin = [\n    {user = \"alice\"},\n    {user = \"charlie\"}\n  ]\n
"},{"location":"configuration/acl/#aclserver","title":"acl.*.server","text":"
  • Syntax: string
  • Example: server = \"localhost\"

Matches all JIDs with the specified domain name. The following class includes alice@localhost, but not alice@xmpp.org:

  localhost_users = [\n    {server = \"localhost\"}\n  ]\n

This option can be combined with user - only alice@localhost belongs to the following class:

  admin = [\n    {user = \"alice\", server = \"localhost\"}\n  ]\n
"},{"location":"configuration/acl/#aclresource","title":"acl.*.resource","text":"
  • Syntax: string
  • Example: resource = \"mobile\"

Matches all JIDs with the specified resource name. The following class includes alice@localhost/mobile, but not alice@localhost/home:

  mobile_users = [\n    {resource = \"mobile\"}\n  ]\n
This option can be combined with user and server - only alice@localhost/mobile belongs to the following class:

  admin = [\n    {user = \"alice\", server = \"localhost\", resource = \"mobile\"}\n  ]\n
"},{"location":"configuration/acl/#acluser_regexp","title":"acl.*.user_regexp","text":"
  • Syntax: string, regular expression
  • Example: user_regexp = \"^user.*\"

Matches all JIDs with the user name matching the regular expression. The following class includes alice@localhost and albert@jabber.org, but not bob@localhost:

  ae = [\n    {user_regexp = \"^a.*e\"}\n  ]\n

This option can be combined with server - here albert@jabber.org is excluded:

  localhost_ae = [\n    {user_regexp = \"^a.*e\", server = \"localhost\"}\n  ]\n
"},{"location":"configuration/acl/#aclserver_regexp","title":"acl.*.server_regexp","text":"
  • Syntax: string, regular expression
  • Example: server = \"localhost\"

Matches all JIDs with the domain name matching the regular expression. The following class includes alice@host1, but not alice@xmpp.org:

  host_users = [\n    {server_regexp = \"host\"}\n  ]\n

This option can be combined with user_regexp, e.g. we can require the user name to contain 'a' and the domain name to start with 'a':

  a = [\n    {user_regexp = \"a\", server_regexp = \"^a\"}\n  ]\n
"},{"location":"configuration/acl/#aclresource_regexp","title":"acl.*.resource_regexp","text":"
  • Syntax: string, regular expression
  • Example: resource_regexp = \"^res\"

Matches all JIDs with the resource name matching the regular expression. This class includes bob@xmpp.org/res123, but not bob@xmpp.org/home:

  digital_resources = [\n    {resource_regexp = '^res\\d+$'}\n  ]\n

Note the use of a literal string (single quotes) to prevent \\d from being escaped.

"},{"location":"configuration/acl/#acluser_glob","title":"acl.*.user_glob","text":"
  • Syntax: string, glob pattern
  • Example: user_glob = \"^user.*\"

Matches all JIDs with the user name matching the pattern: The following class includes alice@localhost and albert@jabber.org, but not bob@localhost:

  ae_users = [\n    {user_glob = \"a*e*\"}\n  ]\n

This option can be combined with server - here albert@jabber.org is excluded:

  localhost_ae_users = [\n    {user_glob = \"a*e*\", server = \"localhost\"}\n  ]\n
"},{"location":"configuration/acl/#aclserver_glob","title":"acl.*.server_glob","text":"
  • Syntax: string, glob pattern
  • Example: server = \"localhost\"

Matches all JIDs with the domain name matching the pattern. The following class includes alice@host1, but not alice@xmpp.org:

  localhost_users = [\n    {server_glob = \"host*\"}\n  ]\n

This option can be combined with user_glob, e.g. we can require the user name to contain 'a' and the domain name to start with 'a':

  a = [\n    {user_glob = \"*a*\", server_glob = \"a*\"}\n  ]\n
"},{"location":"configuration/acl/#aclresource_glob","title":"acl.*.resource_glob","text":"
  • Syntax: string, glob pattern
  • Example: resource_glob = \"^res\"

Matches all JIDs with the resource name matching the pattern. This class includes bob@xmpp.org/res123, but not bob@xmpp.org/home:

  limited_resources = [\n    {resource_glob = \"res???\"}\n  ]\n
"},{"location":"configuration/auth/","title":"Options: Auth","text":"

The auth section is used to choose and configure the method which is used by MongooseIM to authenticate connecting users. The following methods are supported:

  • internal - stores the user accounts in an internal Mnesia database,
  • rdbms - stores the user accounts in a SQL database,
  • external - uses an external program to authenticate the user,
  • anonymous - allows anonymous connections,
  • ldap - checks the user credentials in LDAP,
  • jwt - authenticates the users with JSON Web Tokens,
  • http - uses an external HTTP service to authenticate the user,
  • pki - uses the certificate provided by the user to authenticate them,
  • dummy - no authentication, only for development and testing.

To allow the users to connect, you need to choose the authentication method from the list above and enable it by adding a corresponding section. For example, the default configuration file has the [auth.internal] section, which enables the internal method, using the internal Mnesia database to store users and their passwords. However, for production systems other methods like rdbms are recommended, as using an external database offers easier maintenance, flexibility, scalability and configurability in a typical setup. Some methods have more complex setup procedures and have their own specific options - the method names above are links to their descriptions. There are some general authentication options as well, which are described below.

Warning

Make sure that the compatible SASL mechanisms are enabled, see capabilities.

"},{"location":"configuration/auth/#general-options","title":"General Options","text":"

The options listed here affect more than one configuration method.

"},{"location":"configuration/auth/#authmethods","title":"auth.methods","text":"
  • Syntax: array of strings. Allowed values: \"internal\", \"rdbms\", \"external\", \"anonymous\", \"ldap\", \"jwt\", \"http\", \"pki\", \"dummy\"
  • Default: not set
  • Example: methods = [\"internal\", \"anonymous\"]

It is possible to enable more than one method - they are queried one by one in the alphabetical order until one of them succeeds or there are no more methods. You can change the default order by using this option. Make sure that all methods from the list have their corresponding sections included in the auth section, e.g.

[auth]\n  methods = [\"internal\", \"dummy\"]\n\n  [auth.internal]\n\n  [auth.dummy]\n    variance = 1000\n
"},{"location":"configuration/auth/#authsasl_mechanisms","title":"auth.sasl_mechanisms","text":"
  • Syntax: array of strings. Allowed values: \"scram_sha512_plus\", \"scram_sha512\", \"scram_sha384_plus\", \"scram_sha384\", \"scram_sha256_plus\", \"scram_sha256\", \"scram_sha224_plus\", \"scram_sha224\", \"scram_sha1_plus\", \"scram_sha1\", \"plain\", \"anonymous\", \"oauth\", \"external\", \"digest\"
  • Default: [\"scram_sha512_plus\", \"scram_sha512\", \"scram_sha384_plus\", \"scram_sha384\", \"scram_sha256_plus\", \"scram_sha256\", \"scram_sha224_plus\", \"scram_sha224\", \"scram_sha1_plus\", \"scram_sha1\", \"plain\", \"anonymous\", \"oauth\"]
  • Example: sasl_mechanisms = [\"external\", \"plain\"]

Specifies the list of allowed SASL mechanisms, which are announced during stream negotiation and eventually enforced (users can't pick a mechanism not listed here).

Notes

  • This list is still filtered by capabilities. For example, if you use the internal method, only the PLAIN, DIGEST-MD5 and SCRAM-SHA-* mechanisms from the list will be supported. If there are no compatible mechanisms on the list, the users will not be able to authenticate.
  • Configuring the sasl_mechanisms replaces the default list entirely.
  • The order in which the mechanisms are listed in the config will be taken as the order in which they are advertised.
  • All SCRAM-SHA-* mechanisms (specified as scram_sha*) have their counterparts which support channel binding and are advertised as separate authentication mechanisms suffixed by -PLUS (specified as scram_sha*_plus).
  • The DIGEST-MD5 mechanism (specified as digest) is deprecated and will be removed in the next release.
"},{"location":"configuration/auth/#authentication-method-capabilities","title":"Authentication method capabilities","text":"

The table below shows the supported SASL mechanisms (columns) for each authentication method (row).

plain digest scram_sha* anonymous external internal x x x rdbms x x x external x anonymous x x x x ldap x x jwt x http x x x pki x dummy x"},{"location":"configuration/auth/#authsasl_external","title":"auth.sasl_external","text":"
  • Syntax: list of strings, allowed values: \"standard\", \"common_name\", \"auth_id\"
  • Default: [\"standard\"]
  • Example: sasl_external = [\"standard\", \"common_name\"]

There are three possible ways of using the SASL EXTERNAL mechanism:

  • standard - do not accept a certificate with no xmpp_addrs field (default),
  • common_name - use the common_name field if it is provided in the certificate,
  • auth_id - accept a certificate without xmpp_addrs and use the user identity from the authentication request.

This option allows you to list the enabled ones in the order of preference (they are tried until one succeeds or the list is exhausted).

"},{"location":"configuration/auth/#authmax_users_per_domain","title":"auth.max_users_per_domain","text":"
  • Syntax: positive integer or string \"infinity\", representing maximum amount of users that can be registered in a domain
  • Default: \"infinity\"
  • Example: max_users_per_domain = 10000

Limits the number of users that can be registered for each domain. If the option is configured to the value \"infinity\", no limit is present.

Warning

The limit only works for the following authentication methods: internal, rdbms and ldap.

"},{"location":"configuration/auth/#password-related-options","title":"Password-related options","text":"

These options are common to the http, rdbms and internal methods.

"},{"location":"configuration/auth/#authpasswordformat","title":"auth.password.format","text":"
  • Syntax: string, one of: \"plain\", \"scram\"
  • Default: \"scram\"
  • Example: password.format = \"plain\"

Decide whether user passwords will be kept plain or hashed in the database. Currently, popular XMPP clients support the SCRAM method and it is strongly recommended to use the hashed version. The older XMPP clients can still use the PLAIN mechanism even if the format is set to scram.

Note

The DIGEST-MD5 mechanism is not available with the scram password format.

"},{"location":"configuration/auth/#scram-options","title":"SCRAM options","text":"

For these options to take effect, password.format should be set to scram.

"},{"location":"configuration/auth/#authpasswordhash","title":"auth.password.hash","text":"
  • Syntax: list of strings, allowed values: \"sha\", \"sha224\", \"sha256\", \"sha384\", \"sha512\"
  • Default: not set - all hash functions supported
  • Example: password.hash = [\"sha384\", \"sha512\"]

MongooseIM supports SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for SCRAM hashing. You can use this option to limit the supported hash functions by listing them explicitly. The value \"sha\" stands for the SHA-1 algorithm.

Warning

This option limits the supported SCRAM-SHA-* SASL mechanisms to the ones compatible with the specified hash functions.

"},{"location":"configuration/auth/#authpasswordscram_iterations","title":"auth.password.scram_iterations","text":"
  • Syntax: positive integer
  • Default: 10000, as recommended in this XEP and this NIST Guidelines
  • Example: password.scram_iterations = 20_000

Hash function round count. This is a tradeoff between latency and security. The higher the value, the more difficult breaking the hashes is: increasing the count increases the work it requires to compute a full derivation, which effectively slows down brute-force attacks. But it adds load on both client and server, so this parameter should be tuned as high as the business-rules allow. Note that increasing the security of a password has a higher impact over the security of the algorithm, without impacting its load. See more information in this NIST guide, Appendix A.2.2

"},{"location":"configuration/auth/#examples","title":"Examples","text":"

Internal authentication method without any general options - you can skip the auth section in this case:

[auth.internal]\n

Internal authentication method with some general options:

[auth]\n  password.hash = [\"sha512\"]\n  password.scram_iterations = 20000\n\n  [auth.internal]\n

For more specific examples, see the links below.

"},{"location":"configuration/auth/#method-specific-options","title":"Method-specific options","text":"

See the links below for options related to the particular methods:

  • RDBMS method options
  • Anonymous method options
  • External method options
  • LDAP method options
  • JWT method options
  • HTTP method options
"},{"location":"configuration/configuration-files/","title":"Configuration Files","text":"

The following files are used to configure MongooseIM:

  • mongooseim.toml for MongooseIM settings,

  • vm.args to affect the Erlang VM behaviour (performance tuning, node name),

  • app.config to change low-level logging parameters and settings of other Erlang applications.

"},{"location":"configuration/configuration-files/#mongooseimtoml","title":"mongooseim.toml","text":"

This TOML file contains the configuration options for the MongooseIM server. It is located at [MongooseIM repo root]/rel/files/ if you are building from source or [MongooseIM install root]/etc/ if you are using a pre-built version.

The file is divided into the following sections:

  • general - Served XMPP domains, log level, server language and some other miscellaneous settings.
  • listen - Configured listeners, receiving incoming XMPP and HTTP connections.
  • auth - Supported client authentication methods and their options.
  • internal_databases - Options for Mnesia and CETS. They are primarily used for clustering.
  • outgoing_pools - Outgoing connections to external services, including databases, message queues and HTTP services.
  • services - Internal services like an administration API and system metrics.
  • modules - XMPP extension modules, which extend the basic functionality provided by XMPP.
  • shaper - Traffic shapers that limit the incoming XMPP traffic, providing a safety valve to protect the server.
  • acl - Access classes to which connecting users are assigned.
  • access - Access rules, specifying the privileges of the defined access classes.
  • s2s - Server-to-server connection options, used for XMPP federation.
  • host_config - Configuration options for different XMPP domains or host types (groups of domains).

The section names above are links to the detailed documentation of each section.

Warning

It is recommended to use the same configuration file for all nodes in the cluster, but there is no protection against using different option values for each node, because it can happen in two cases:

  • During a rolling upgrade procedure, when nodes are restarted one by one with new configuration.
  • When you need different network-specific parameters (e.g. listening IP addresses) for each node.
"},{"location":"configuration/configuration-files/#vmargs","title":"vm.args","text":"

This file contains parameters passed directly to the Erlang VM. To configure it, go to [MongooseIM root]/rel/files/.

Let's explore the default options.

"},{"location":"configuration/configuration-files/#options","title":"Options","text":"
  • -sname - Erlang node name. Can be changed to name, if necessary
  • -setcookie - Erlang cookie. All nodes in a cluster must use the same cookie value.
  • +K - Enables kernel polling. It improves the stability when a large number of sockets is opened, but some systems might benefit from disabling it. Might be a subject of individual load testing.
  • +A 5 - Sets the asynchronous threads number. Async threads improve I/O operations efficiency by relieving scheduler threads of IO waits.
  • +P 10000000 - Process count limit. This is a maximum allowed number of processes running per node. In general, it should exceed the tripled estimated online user count.
  • -env ERL_MAX_PORTS 250000 - Open port count. This is a maximum allowed number of ports opened per node. In general, it should exceed the tripled estimated online user count. Keep in mind that increasing this number also increases the memory usage by a constant amount, so finding the right balance for it is important for every project.
  • -env ERL_FULLSWEEP_AFTER 2 - affects garbage collection. Reduces memory consumption (forces often full g.c.) at the expense of CPU usage.
  • -sasl sasl_error_logger false - MongooseIM's solution for logging is Lager, so SASL error logger is disabled.
"},{"location":"configuration/configuration-files/#appconfig","title":"app.config","text":"

A file with Erlang application configuration. To configure it, go to [MongooseIM root]/rel/files/. By default only the following applications can be found there:

  • logger - check Logger's documentation for more information.
  • ssl
    • session_lifetime (default specified in the file: 600 seconds) - This parameter says for how long should the ssl session remain in the cache for further re-use, should ssl session resumption happen.
"},{"location":"configuration/configuration-files/#configuring-tls-certificates-keys","title":"Configuring TLS: Certificates & Keys","text":"

TLS is configured in one of two ways: some modules need a private key and certificate (chain) in separate files, while others need both in a single file. This is because recent additions use OTP's ssl library, while older modules use p1_tls, respectively.

  • Client-to-server connections need both in the same .pem file
  • Server-to-server connections need both in the same .pem file
  • BOSH, WebSockets and REST APIs need them in separate files

In order to create private key & certificate bundle, you may simply concatenate them.

More information about configuring TLS for these endpoints is available in the listen section configuration page.

"},{"location":"configuration/database-backends-configuration/","title":"Database Backends","text":"

MongooseIM can work with several databases, both RDBMS (SQL) and NoSQL ones. Some of them require extra work before they can be used. For example the SQL databases require defining a schema. MongooseIM is tested with CI, so the CI scripts can be used as a reference.

"},{"location":"configuration/database-backends-configuration/#a-brief-overview","title":"A Brief Overview","text":"

Data in MongooseIM is either transient or persistent:

  • transient: volatile data changing often, such as session data, stream management data, and other in-memory data. These don't need any backup, since after a potential failure, they will naturally rebuild as clients reconnect.
  • persistent: long-lived data, such as roster items, credentials, and chat archives. These absolutely need regular and tested backups.
"},{"location":"configuration/database-backends-configuration/#choosing-a-database-for-mongooseim","title":"Choosing a database for MongooseIM","text":"

Here is some general advice on the use of databases. Subsequent sections go into more depth on each database: what they are suitable for and how to set them up.

Transient data:

  • CETS - a library to synchronise ETS tables between nodes. A new choice to share live data across the MongooseIM cluster. We recommend to use this backend for transient data. This backend requires an RDBMS database configured because we use an external database to discover nodes in the cluster. For a CETS config example, see tutorials.

  • Mnesia - a built-in Erlang Database. Mnesia is fine for a cluster of fixed size with reliable networking between nodes and with nodes rarely restarted. There are some issues when nodes are restarting or new ones joining the cluster. For this case, we recommend to use CETS instead. Mnesia is still the default backend for some modules for compatibility reasons with older config files.

  • Redis - A fantastic choice for storing live data. It's highly scalable and it can be easily shared by multiple MongooseIM nodes. Additionally, Redis' great performance makes it an excellent choice for storing user session data. We recommend caution, since it has not yet been widely tested in production.

Persistent Data:

  • RDBMS - MongooseIM has a strong backend support for relational databases. Reliable and battle proven, they are a great choice for regular MongooseIM use cases and features like privacy lists, vcards, roster, private storage, last activity and message archive. Never loose your data. Use MySQL, MariaDB, PostgreSQL, or MS SQL Server.

  • Cassandra - Only for MAM (Message Archive Management).

  • ElasticSearch - Only for MAM (Message Archive Management).

  • Mnesia - some backends support Mnesia to store data, but it is not recommended. It is still the default option, when not specifying a backend for many modules, so be careful.

    Warning

    We strongly recommend keeping persistent data in an external DB (RDBMS) for production. Mnesia is not suitable for the volumes of persistent data which some modules may require. Sooner or later a migration will be needed which may be painful. It is possible to store all data in Mnesia, but only for testing purposes, not for any serious deployments.

User Data:

  • LDAP - Used for: users, shared rosters, vCards
"},{"location":"configuration/database-backends-configuration/#rdbms","title":"RDBMS","text":""},{"location":"configuration/database-backends-configuration/#mysql","title":"MySQL","text":"

Can be used for:

  • users (credentials)
  • vcards
  • roster
  • private storage
  • privacy/block lists
  • last activity
  • mam (message archive management)
  • muc_light rooms

Setup

The schema files can be found in the priv directory. The default schema is defined in the mysql.sql file.

You can use the following command to apply it on localhost:

mysql -h localhost -u user -p -e 'create database mongooseim'\nmysql -h localhost -u user -p mongooseim < mysql.sql\n

You should also configure the MySQL database in the mongooseim.toml file. Please refer to the RDBMS options for more information.

Version notice

The required minimum version of MySQL is 8.0 because MongooseIM uses the JSON data type and the INSERT INTO ... AS ... query syntax.

"},{"location":"configuration/database-backends-configuration/#postgresql","title":"PostgreSQL","text":"

Can be used for:

  • users (credentials)
  • vcards
  • roster
  • private storage
  • privacy/block lists
  • last activity
  • mam (message archive management)
  • muc_light rooms

Setup

The schema files can be found in the priv directory. The default schema is defined in the pg.sql file.

You can use the following command to apply it on localhost:

psql -h localhost -U user -c \"CREATE DATABASE mongooseim;\"\npsql -h localhost -U user -q -d mongooseim -f pg.sql\n
You should also configure the Postgres database in the mongooseim.toml file. Please refer to the RDBMS options and general database options for more information.

"},{"location":"configuration/database-backends-configuration/#microsoft-sql-server","title":"Microsoft SQL Server","text":"

Microsoft SQL Server, sometimes called MSSQL, or Azure SQL Database.

Warning

MongooseIM can only connect to MSSQL on Ubuntu Xenial x64.

This can be used for:

  • users (credentials)
  • vcards
  • roster
  • private storage
  • privacy/block lists
  • last activity
  • mam (message archive management)
  • muc_light rooms

Setup

MSSQL can be used from MongooseIM through the ODBC layer with FreeTDS driver, so you need them installed on your system.

# Ubuntu\n$ sudo apt install freetds-dev tdsodbc\n\n# CentOS compatible systems (Rocky, Alma)\n$ sudo yum install freetds\n\n# macOS\n$ brew install freetds\n

Then you need to configure the connection. Add your database (mongooseim here) to the /etc/odbc.ini or $HOME/.odbc.ini file:

[mongoose-mssql]\n; Ubuntu\nDriver      = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so\nSetup       = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so\n; CentOS compatible\n; Driver      = /usr/lib64/libtdsodbc.so.0\n; Setup       = /usr/lib64/libtdsS.so\n; macOS\n; Driver      = /usr/local/Cellar/freetds/[current version]/lib/libtdsodbc.so\nServer      = 127.0.0.1\nPort        = 1433\nDatabase    = mongooseim\nCharset     = UTF-8\nTDS_Version = 7.2\nclient_charset = UTF-8\n

Please amend the paths above to match your current OS if necessary.

For more details, please refer to the freetds.conf documentation and unixodbc documentation.

MongooseIM is built with ODBC support by default.

Deadlocks notice

If muc_light's backend is set to ODBC and there are many rooms created in parallel in your system, there may be some deadlocks due to the READ_COMMITTED_SNAPSHOT set to OFF by default. In this case we recommend setting this database property to ON, this will enable row level locking which significantly reduces deadlock chances around muc_light operations.

This property can be set by the following ALTER DATABASE query:

ALTER DATABASE $name_of_your_db SET READ_COMMITTED_SNAPSHOT ON\n

The command above may take some time.

Then you need to import the SQL schema from mssql2012.sql. You can use a Microsoft's GUI tool (the provided .sql files should work with it) or isql, but after a slight modification of the dump file:

cat mssql2012.sql | tr -d '\\r' | tr '\\n' ' ' | sed 's/GO/\\n/g' |\nisql mongoose-mssql username password -b\n

The final step is to configure mongooseim.toml appropriately. Set the following option in the general section:

[general]\n  rdbms_server_type = \"mssql\"\n

Configure the outgoing_pools.rdbms section as follows:

[outgoing_pools.rdbms.default]\n  workers = 5\n\n  [outgoing_pools.rdbms.default.connection]\n    driver = \"odbc\"\n    settings = \"DSN=mongoose-mssql;UID=username;PWD=password\"\n
"},{"location":"configuration/database-backends-configuration/#nosql","title":"NoSQL","text":""},{"location":"configuration/database-backends-configuration/#cassandra","title":"Cassandra","text":"

Setup

This will prepare Cassandra for connection from MongooseIM. Make sure Cassandra is running, open a new terminal window and enter the following commands:

$ cqlsh\n$ cqlsh> source '$REPO/priv/casssandra.cql';\n

"},{"location":"configuration/database-backends-configuration/#elasticsearch","title":"ElasticSearch","text":"

Can be used for:

  • MAM (Message Archive Management)

Setup

Please note that MongooseIM has been tested to work properly with ElasticSearch version 5.6.9.

In order to use ElasticSearch as a MAM backend, you'll need to create required indexes and mappings. From the root of MongooseIM's repository run:

curl -X PUT $ELASTICSEARCH_URL/messages -d '@priv/elasticsearch/pm.json'\ncurl -X PUT $ELASTICSEARCH_URL/muc_messages -d '@priv/elasticsearch/muc.json'\n

where $ELASTICSEARCH_URL is a URL pointing to your ElasticSearch node's HTTP API endpoint.

Please refer to the advanced configuration page to check how to configure MongooseIM to connect to ElasticSearch node.

"},{"location":"configuration/database-backends-configuration/#redis","title":"Redis","text":"

Can be used for:

  • users sessions

Setup

Please refer to the Redis options for more information.

"},{"location":"configuration/database-backends-configuration/#ldap","title":"LDAP","text":"

Can be used for:

  • users (credentials)
  • shared roster
  • vcard

Setup

Please refer to the LDAP options for more information.

"},{"location":"configuration/general/","title":"Options: General","text":"

The general section contains basic settings as well as some miscellaneous options. You can start with providing only the basic options, for example configuring the loglevel, a single host (XMPP domain) as the default, and setting the server language:

[general]\n  loglevel = \"warning\"\n  hosts = [\"my-xmpp-domain.com\"]\n  default_server_domain = \"my-xmpp-domain.com\"\n  language = \"en\"\n

All options are described below.

"},{"location":"configuration/general/#general-options","title":"General options","text":"

These are the basic settings that you should configure before running your MongooseIM server.

"},{"location":"configuration/general/#generalloglevel","title":"general.loglevel","text":"
  • Syntax: string, one of \"none\", \"emergency\", \"alert\", \"critical\", \"error\", \"warning\", \"notice\", \"info\", \"debug\", \"all\".
  • Default: \"warning\"
  • Example: loglevel = \"error\"

Verbosity level of the logger. Values recommended for production systems are \"error\" and \"warning\". The \"debug\" level is good for development.

"},{"location":"configuration/general/#generalhosts","title":"general.hosts","text":"
  • Syntax: array of strings representing the domain names.
  • Default: none. If omitted, at least one host type has to be defined in general.host_types.
  • Example: hosts = [\"localhost\", \"domain2\"]

This option specifies the statically defined XMPP domains served by this cluster. In order to configure these hosts independently, use the host_config section.

Note

At least one of general.hosts or general.host_types have to be provided.

Warning

Extension modules and database backends will be started separately for every domain from this list. When increasing the number of domains, please make sure you have enough resources available (e.g. connection limit set in the DBMS).

"},{"location":"configuration/general/#generalhost_types","title":"general.host_types","text":"
  • Syntax: array of strings the names for host types.
  • Default: none. If omitted, at least one hast has to be defined in general.hosts.
  • Example: host_types = [\"first type\", \"second type\"]

This is the list of names for the types of hosts that will serve dynamic XMPP domains. Each host type can be seen as a label for a group of independent domains that use the same server configuration. In order to configure these host types independently, use the host_config section. The domains can be added or removed dynamically with the command line interface or using the API.

If you use the host type mechanism, make sure you only configure modules which support dynamic domains in the modules or host_config.modules sections. MongooseIM will not start otherwise. Most of the modules are compatible with host types, but please read the particular extension module's page, or the incompatible modules list to see which do not. Moreover, s2s as well as XMPP components (XEP-0114), as configured in the listen.service section, do not support dynamic domains.

Note

At least one of general.hosts or general.host_types have to be provided.

Warning

Extension modules and database backends will be started separately for every host type from this list. When increasing the number of host types, please make sure you have enough resources available (e.g. connection limit set in the DBMS).

"},{"location":"configuration/general/#generaldefault_server_domain","title":"general.default_server_domain","text":"
  • Syntax: a string
  • Default: none, this option is mandatory.
  • Example: default_server_domain = \"my-xmpp-domain.com\"

This domain is used as a default when one cannot be determined, for example when sending XMPP stream errors to unauthenticated clients.

"},{"location":"configuration/general/#generallanguage","title":"general.language","text":"
  • Syntax: string representing the two-letter language code.
  • Default: \"en\"
  • Example: language = \"pl\"

Default language for messages sent by the server to users. You can get a full list of supported codes by executing cd [MongooseIM root] ; ls priv/*.msg | awk '{split($0,a,\"/\"); split(a[4],b,\".\"); print b[1]}' (en is not listed there)

"},{"location":"configuration/general/#database-settings","title":"Database settings","text":"

RDBMS connection pools are set using outgoing connections configuration. There are some additional options that influence all database connections in the server:

"},{"location":"configuration/general/#generalrdbms_server_type","title":"general.rdbms_server_type","text":"
  • Syntax: string, \"mssql\" or \"pgsql\"
  • Default: not set
  • Example: rdbms_server_type = \"mssql\"

When using MSSQL or PostgreSQL databases, this option allows MongooseIM to optimize some queries for these DBs (e.g. mod_mam_rdbms_user uses different queries for mssql).

"},{"location":"configuration/general/#access-management","title":"Access management","text":"

User access rules are configured mainly in the acl and access sections.

"},{"location":"configuration/general/#security","title":"Security","text":"

Here you can find some additional options related to system security.

"},{"location":"configuration/general/#generalregistration_timeout","title":"general.registration_timeout","text":"
  • Syntax: the string \"infinity\" or a number of seconds (positive integer)
  • Default: 600
  • Example: registration_timeout = \"infinity\"

Limits the registration frequency from a single IP address. The special value infinity means no limit.

"},{"location":"configuration/general/#generalhide_service_name","title":"general.hide_service_name","text":"
  • Syntax: boolean
  • Default: false
  • Example: hide_service_name = true

According to RFC 6210, even when a client sends invalid data after opening a connection, the server must open an XML stream and return a stream error anyway. For extra security, this option may be enabled. It changes MIM behaviour to simply close the connection without any errors returned (effectively hiding the server's identity).

"},{"location":"configuration/general/#user-session-management","title":"User session management","text":"

These options can be used to configure the way MongooseIM manages user sessions.

"},{"location":"configuration/general/#generalsm_backend","title":"general.sm_backend","text":"
  • Syntax: string: \"mnesia\", \"cets\" or \"redis\"
  • Default: \"mnesia\"
  • Example: sm_backend = \"redis\"

Backend for storing user session data. All nodes in a cluster must have access to a complete session database. CETS is a new backend, requires RDBMS configured to work properly. Mnesia is a legacy backend, sufficient in most cases, use Redis only in large deployments when you notice issues with the mnesia backend. Requires a redis pool with the default tag defined in the outgoing_pools section. See the section about redis connection setup for more information.

Warning

When set to mnesia or cets, the corresponding internal database has to be enabled.

"},{"location":"configuration/general/#generalreplaced_wait_timeout","title":"general.replaced_wait_timeout","text":"
  • Syntax: positive integer, representing time in milliseconds
  • Default: 2000
  • Example: replaced_wait_timeout = 5000

When a user's session is replaced (due to a full JID conflict) by a new one, this parameter specifies the time MongooseIM waits for the old sessions to close. The default value is sufficient in most cases. If you observe replaced_wait_timeout warning in logs, then most probably the old sessions are frozen for some reason and it should be investigated.

"},{"location":"configuration/general/#xmpp-federation-s2s","title":"XMPP federation (S2S)","text":""},{"location":"configuration/general/#generals2s_backend","title":"general.s2s_backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: s2s_backend = \"cets\"

Backend for replicating the list of outgoing Server to Server (S2S) connections across the nodes of the local MongooseIM cluster.

Warning

The corresponding internal database has to be enabled.

"},{"location":"configuration/general/#external-xmpp-components","title":"External XMPP components","text":""},{"location":"configuration/general/#generalcomponent_backend","title":"general.component_backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: component_backend = \"cets\"

Backend for replicating the list of connected external components across the nodes of the local MongooseIM cluster.

Warning

The corresponding internal database has to be enabled.

"},{"location":"configuration/general/#message-routing","title":"Message routing","text":"

The following options influence the way MongooseIM routes incoming messages to their recipients.

"},{"location":"configuration/general/#generalroute_subdomains","title":"general.route_subdomains","text":"
  • Syntax: string, the only accepted value is \"s2s\"
  • Default: not set
  • Example: route_subdomains = \"s2s\"

If a stanza is addressed to a subdomain of the served domain and this option is set to s2s, such a stanza will be transmitted over a server-to-server connection. Without it, MongooseIM will try to route the stanza to one of its internal services.

"},{"location":"configuration/general/#generalrouting_modules","title":"general.routing_modules","text":"
  • Syntax: a list of strings representing the routing module names.
  • Default: [\"mongoose_router_global\", \"mongoose_router_localdomain\", \"mongoose_router_external_localnode\", \"mongoose_router_external\", \"mongoose_router_dynamic_domains\", \"ejabberd_s2s\"]
  • Example: routing_modules = [\"mongoose_router_global\", \"mongoose_router_localdomain\"]

Provides an ordered list of modules used for routing messages. All available modules are enabled by default, and you can change their order or disable some of them by providing your own list. See the Message routing section of the developer's guide for more information.

"},{"location":"configuration/general/#miscellaneous","title":"Miscellaneous","text":"

The options listed below are used to configure more specific settings, that do not need to be changed in usual use cases.

"},{"location":"configuration/general/#generalall_metrics_are_global","title":"general.all_metrics_are_global","text":"
  • Syntax: boolean
  • Default: false
  • Example: all_metrics_are_global = true

When enabled, all per-host metrics are merged into global equivalents. It means it is no longer possible to view individual host1, host2, host3, ... metrics, only sums are available. This option significantly reduces CPU and (especially) memory footprint in setups with exceptionally many domains (thousands, tens of thousands).

"},{"location":"configuration/general/#generalhttp_server_name","title":"general.http_server_name","text":"
  • Syntax: string
  • Default: \"Cowboy\"
  • Example: http_server_name = \"Apache\"

Replaces Cowboy's default name returned in the server HTTP response header. It may be used for extra security, as it makes it harder for the malicious user to learn what HTTP software is running under a specific port. This option applies to all configured HTTP listeners.

"},{"location":"configuration/general/#generalmax_fsm_queue","title":"general.max_fsm_queue","text":"
  • Syntax: positive integer
  • Default: not set
  • Example: max_fsm_queue = 5000

When specified, will terminate certain processes (e.g. client handlers) that have more messages accumulated in the queue than the specified limit, to prevent resource exhaustion. This option is set for C2S, outgoing S2S and component connections and can be overridden for particular s2s or service listeners in their configurations. Use with caution!

"},{"location":"configuration/general/#generaldomain_certfile","title":"general.domain_certfile","text":"
  • Syntax: array of TOML tables with the following mandatory content:
    • domain - string, XMPP domain name. In case of dynamic domains it should be a host type instead.
    • certfile - string, path in the file system
  • Default: not set
  • Example:
  domain_certfile = [\n    {domain = \"localhost1.com\", certfile = \"cert1.pem\"},\n    {domain = \"localhost2.com\", certfile = \"cert2.pem\"}\n  ]\n

This option overrides the configured certificate file for specific local XMPP domains.

Notes

  • This option applies to S2S and C2S connections.
  • Each domain (or host type) needs to be included in the list of hosts or host types.
"},{"location":"configuration/host_config/","title":"Options: Host config","text":"

The host_config section is used to configure options for specific XMPP domains or for host types, which are used to group multiple domains. For each domain or host type requiring such options, a host_config section needs to be created with the following format:

  • Syntax: domain subsection starts with [[host_config]] and contains the options listed below.
  • Default: none - all domain-level options need to be specified explicitly.
  • Example: see the examples for each section below.

Note

Each hosted domain needs to be included in the list of hosts in the general section. Similarly, each host type needs to be included in general.host_types.

"},{"location":"configuration/host_config/#general-options","title":"General options","text":""},{"location":"configuration/host_config/#host_confighost","title":"host_config.host","text":"
  • Syntax: string, domain name
  • Default: no default, either this option or host_config.host_type is mandatory
  • Example: host = \"my-xmpp-server.com\"

This option specifies the XMPP domain that this section refers to.

"},{"location":"configuration/host_config/#host_confighost_type","title":"host_config.host_type","text":"
  • Syntax: string, host type name
  • Default: no default, either this option or host_config.host is mandatory
  • Example: host_type = \"first type\"

This option specifies the host type that this section refers to.

"},{"location":"configuration/host_config/#configuration-sections","title":"Configuration sections","text":"

The following sections are accepted in host_config:

"},{"location":"configuration/host_config/#host_configgeneral","title":"host_config.general","text":"

The options defined here override the ones defined in the top-level general section. The following options are allowed:

  • route_subdomains
  • replaced_wait_timeout
"},{"location":"configuration/host_config/#example","title":"Example","text":"

The replaced_wait_timeout option is set to 2000 only for domain2.com.

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n  loglevel = \"info\"\n  replaced_wait_timeout = 1000\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.general]\n    replaced_wait_timeout = 2000\n
"},{"location":"configuration/host_config/#host_configauth","title":"host_config.auth","text":"

This section completely overrides the top-level auth section, all options are allowed.

"},{"location":"configuration/host_config/#example_1","title":"Example","text":"

In the example below the number of scram_iterations is increased for domain2. It is necessary to put methods and password.hash and there as well, as otherwise they would not be set for domain2.

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[auth]\n  methods = [\"rdbms\"]\n  password.hash = [\"sha256\"]\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.auth]\n    methods = [\"rdbms\"]\n    password.hash = [\"sha256\"]\n    scram_iterations = 40_000\n
"},{"location":"configuration/host_config/#host_configmodules","title":"host_config.modules","text":"

This section completely overrides the top-level modules section. Remember that only the modules supporting dynamic domains are allowed if you are specifying options for a host type. The ones that do not support it can be found in the modules list.

"},{"location":"configuration/host_config/#example_2","title":"Example","text":"

The modules enabled for domain2.com will be mod_disco and mod_stream_management. If we wanted to enable mod_roster, it would need to be repeated in host_config.

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[modules.mod_disco]\n  users_can_see_hidden_services = false\n\n[modules.mod_roster]\n  backend = \"rdbms\"\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.modules.mod_disco]\n    users_can_see_hidden_services = false\n\n  [host_config.modules.mod_stream_management]\n
"},{"location":"configuration/host_config/#host_configoutgoing_pools","title":"host_config.outgoing_pools","text":"

This section overrides any pool with the same type and tag that was defined in the top-level outgoing_pools section. If we wanted to enable a default rdbms pool only for \"host-type-basic\" for example, we could do so as follows:

[general]\n  host_type = [\"host-type-basic\", \"host-type-advanced\", \"host-type-privacy\"]\n\n[[host_config]]\n  host = \"host-type-basic\"\n\n  [outgoing_pools.rdbms.default]\n    workers = 5\n    [outgoing_pools.rdbms.default.connection]\n    ...\n

Configuration for such pools is all the same, except that the scope key is here disallowed.

"},{"location":"configuration/host_config/#host_configacl","title":"host_config.acl","text":"

The access classes defined here are merged with the ones defined in the top-level acl section - when a class is defined in both places, the result is a union of both classes.

"},{"location":"configuration/host_config/#example_3","title":"Example","text":"

The blocked access class is extended for host_config by adding hacker2.

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[acl]\n  blocked = [\n    {user = \"spammer\"},\n    {user = \"hacker1\"}\n  ]\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.acl]\n    blocked = [\n      {user = \"hacker2\"}\n    ]\n
"},{"location":"configuration/host_config/#host_configaccess","title":"host_config.access","text":"

The access rules defined here are merged with the ones defined in the top-level access section: When a rule is defined in both places:

  • If the top-level rule ends with a catch-all clause {acl = \"all\", value = \"allow\"}, the resulting domain-specific rule has the clauses from both rules with the domain-specific clauses inserted after the top-level ones, but before the catch-all clause.
  • If the top-level rule does not end with a catch-all clause, the resulting domain-specific rule has the clauses from both rules with the domain-specific clauses inserted after the top-level ones.
"},{"location":"configuration/host_config/#example_4","title":"Example","text":"

The c2s access rule defined at the top level allows anyone to connect. However, the rule for domain2.com is extended to prevent the blocked users from connecting:

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[access]\n  c2s = [\n    {acl = \"admin\", value = \"allow\"},\n    {acl = \"all\", value = \"allow\"}\n  ]\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.access]\n    c2s = [\n      {acl = \"blocked\", value = \"deny\"}\n    ]\n\n    register = [\n      {acl = \"all\", value = \"deny\"}\n    ]\n

The resulting rule for domain2.com could be written as:

c2s = [\n  {acl = \"admin\", value = \"allow\"},\n  {acl = \"blocked\", value = \"deny\"},\n  {acl = \"all\", value = \"allow\"}\n]\n

The register rule is defined only for domain2.com.

Note

Some access rules are checked outside of the context of any domain, e.g. the access rule for external components - defining them in host_config would have no effect.

"},{"location":"configuration/host_config/#host_configs2s","title":"host_config.s2s","text":"

This section completely overrides the top-level s2s section, all options are allowed.

"},{"location":"configuration/host_config/#example_5","title":"Example","text":"

The host_policy option is changed for domain2.com:

[general]\n  hosts = [\"domain1.com\", \"domain2.com\", \"domain3.com\"]\n\n[s2s]\n  default_policy = \"deny\"\n\n  host_policy = [\n    {host = \"good-xmpp.org\", policy = \"allow\"},\n    {host = \"bad-xmpp.org\", policy = \"deny\"}\n  ]\n\n[[host_config]]\n  host = \"domain2.com\"\n\n  [host_config.s2s]\n    host_policy = [\n      {host = \"bad-xmpp.org\", policy = \"allow\"},\n      {host = \"evil-xmpp.org\", policy = \"deny\"}\n    ]\n

Note that default_policy for domain2.com has the default value allow, because host_config.s2s completely overrides the top-level s2s section, and all options are reset to the respective default values, unless they are explicitly changed.

"},{"location":"configuration/internal-databases/","title":"Options: Internal Databases","text":"

Internal databases are used to cluster MongooseIM nodes, and to replicate in-memory data (e.g. client sessions) between them.

Mnesia is a legacy way to cluster MongooseIM nodes. It is also could be used to store persistent data, but we recommend to use RDBMS databases instead because of scalability and stability reasons.

CETS is a new way to cluster MongooseIM nodes. CETS needs to know a list of nodes for the node discovery. There are two ways to get a list of nodes:

  • RDBMS database. MongooseIM would write into RDBMS its nodename and read a list of other nodes. This is the best option if you are already using a relational database.
  • A text file with a list of nodes on each line. It is useful when there is an external script to make this file based on some custom logic (for example, a bash script that uses AWS CLI to discover instances in the autoscaling group). This file would be automatically reread on change.

Omitting this section entirely is equivalent to having only Mnesia enabled:

[internal_databases.mnesia]\n

The following example enables only CETS with the default RDBMS discovery backend:

[internal_databases.cets]\n

Warning

When switching to CETS, you need to configure particular backends to actually use it:

  • general backends: sm_backend, s2s_backend, component_backend
  • module backends: mod_bosh, mod_stream_management, mod_jingle_sip, mod_muc

Sometimes you might want to have both databases enabled and choose which backends use a particular DB:

[internal_databases.mnesia]\n\n[internal_databases.cets]\n
"},{"location":"configuration/internal-databases/#cets-options","title":"CETS Options","text":""},{"location":"configuration/internal-databases/#internal_databasescetsbackend","title":"internal_databases.cets.backend","text":"

Backend for CETS discovery.

  • Syntax: string, one of \"rdbms\", \"file\".
  • Default: \"rdbms\"
  • Example: backend = \"rdbms\"
"},{"location":"configuration/internal-databases/#internal_databasescetscluster_name","title":"internal_databases.cets.cluster_name","text":"

Namespace for the cluster. Only nodes with the same cluster name would be discovered. This option is for RDBMS backend.

  • Syntax: string.
  • Default: \"mongooseim\"
  • Example: cluster_name = \"mongooseim\"
"},{"location":"configuration/internal-databases/#internal_databasescetsnode_list_file","title":"internal_databases.cets.node_list_file","text":"

File to read a list of nodes from. Relative to the MongooseIM's release directory. This option is for the file backend. Required, if backend = \"file\".

  • Syntax: path.
  • Default: not specified.
  • Example: node_list_file = \"/etc/mim_nodes.txt\"
"},{"location":"configuration/internal-databases/#example","title":"Example","text":"

The following example enables CETS with the file discovery backend:

[internal_databases.cets]\n    backend = \"file\"\n    node_list_file = \"cets_disco.txt\"\n
"},{"location":"configuration/listen/","title":"Options: Listen","text":"

The listen section specifies how MongooseIM handles incoming connections.

  • Syntax: Each listener is specified in a subsection starting with [[listen.type]] where type is one of the allowed listener types, handling different types of incoming connections:

    • c2s - client-to-server XMPP connections,
    • s2s - server-to-server XMPP connections,
    • service - XMPP connections from external components,
    • http - HTTP connections from clients or other services.

The double-bracket syntax is used because there can be multiple listeners of a given type, so for each listener type there is a TOML array of one or more tables (subsections).

  • Default: None - each listener needs to be enabled explicitly. Typical listeners are already specified in the example configuration file.
  • Example: The simplest XMPP listener configuration, handling only incoming XMPP client connections:
[[listen.c2s]]\n  port = 5222\n
"},{"location":"configuration/listen/#general-listener-options","title":"General listener options","text":"

The options listed below are the same for all listener types. They set the basic listening socket options. Only port is required, the rest can be used to change the default settings.

"},{"location":"configuration/listen/#listenport","title":"listen.*.port","text":"
  • Syntax: integer, port number
  • Default: no default, this option is mandatory.
  • Example: port = 5222

The port number to which the listening socket is bound.

"},{"location":"configuration/listen/#listenip_address","title":"listen.*.ip_address","text":"
  • Syntax: string with the IP address
  • Default: all-zeros address (e.g. \"0.0.0.0\" for IPv4)
  • Example: ip_address = \"127.0.0.1\"

The IP address to which the listening socket is bound.

"},{"location":"configuration/listen/#listenproto","title":"listen.*.proto","text":"
  • Syntax: string, only \"tcp\" is accepted
  • Default: \"tcp\"
  • Example: proto = \"tcp\"

The protocol, which is TCP by default. Currently this is the only valid option.

"},{"location":"configuration/listen/#listenip_version","title":"listen.*.ip_version","text":"
  • Syntax: integer, 4 or 6
  • Default: if ip_address is specified, the IP version is determined from that address, otherwise it is 4
  • Example: ip_version = 6

Allows to set the IP version to IPv6. Does not need to be set if ip_address is defined.

"},{"location":"configuration/listen/#xmpp-listener-options","title":"XMPP listener options","text":"

The options listed below can be set for the c2s, s2s and service listeners to adjust their parameters.

"},{"location":"configuration/listen/#listenbacklog","title":"listen.*.backlog","text":"
  • Syntax: positive integer
  • Default: 1024
  • Example: backlog = 1000

Overrides the default TCP backlog value.

"},{"location":"configuration/listen/#listenproxy_protocol","title":"listen.*.proxy_protocol","text":"
  • Syntax: boolean
  • Default: false
  • Example: proxy_protocol = true

When set to true, Proxy Protocol is enabled and each connecting client has to provide a proxy header. Use only with a proxy (or a load balancer) to allow it to provide the connection details (including the source IP address) of the original client. Versions 1 and 2 of the protocol are supported.

"},{"location":"configuration/listen/#listenhibernate_after","title":"listen.*.hibernate_after","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 0
  • Example: hibernate_after = 10

Time in milliseconds after which a client process spawned by this listener will hibernate. Hibernation greatly reduces memory consumption of client processes, but may result in increased CPU consumption if a client is used very frequently. The default, recommended value of 0 means that the client processes will hibernate at every opportunity.

"},{"location":"configuration/listen/#listenmax_stanza_size","title":"listen.*.max_stanza_size","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_stanza_size = 10_000

Maximum allowed incoming stanza size in bytes.

Warning

This limit is checked after the input data parsing, so it does not apply to the input data size itself.

"},{"location":"configuration/listen/#listennum_acceptors","title":"listen.*.num_acceptors","text":"
  • Syntax: positive integer
  • Default: 100
  • Example: num_acceptors = 200

The number of processes accepting new connections on the listening socket.

"},{"location":"configuration/outgoing-connections/","title":"Options: Outgoing connections","text":"

MongooseIM can be configured to talk to external services like databases or HTTP servers. The interface for outgoing connections management is available via the outgoing_pools config option for the following types of connections:

  • cassandra - pool of connections to Cassandra cluster
  • redis - pool of connections to Redis server
  • http - pool of connections to an HTTP(S) server MongooseIM can talk to, for example HTTP authentication backend or HTTP notifications
  • elastic - pool of connections to ElasticSearch server
  • rdbms - pool of connections to an RDBMS database
  • rabbit - pool of connections to a RabbitMQ server
  • ldap - pool of connections to an LDAP server

  • Syntax: Each pool is specified in a subsection starting with [outgoing_pools.type.tag], where type is one of available connection types and tag is an arbitrary value uniquely identifying the pool within its type. This allows you to create multiple dedicated pools of the same type.

"},{"location":"configuration/outgoing-connections/#general-pool-options","title":"General pool options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsscope","title":"outgoing_pools.*.*.scope","text":"
  • Syntax: string, one of:\"global\", \"host_type\".
  • Default: \"global\"
  • Example: scope = \"host_type\"

scope can be set to:

  • global - meaning that the pool will be started once no matter how many XMPP hosts are served by MongooseIM.
  • host_type - the pool will be started for each static XMPP host or host type served by MongooseIM.

    Note

    A pool with scope global and tag default is used by services that are not configured by host_type, like service_domain_db or service_mongoose_system_metrics, or by modules that don't support dynamic domains, like mod_pubsub. If a global default pool is not configured, these services will fail.

    Note

    The option host is still supported and behaves equivalent to host_type; however, it is deprecated in favour of the latter.

"},{"location":"configuration/outgoing-connections/#worker-pool-options","title":"Worker pool options","text":"

All pools are managed by the inaka/worker_pool library.

Available options are:

"},{"location":"configuration/outgoing-connections/#outgoing_poolsstrategy","title":"outgoing_pools.*.*.strategy","text":"
  • Syntax: string, one of:\"best_worker\", \"random_worker\", \"next_worker\", \"available_worker\", \"next_available_worker\"
  • Default: \"best_worker\"
  • Example: strategy = \"available_worker\"

Defines worker selection strategy. Consult worker_pool documentation for details.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsworkers","title":"outgoing_pools.*.*.workers","text":"
  • Syntax: positive integer
  • Default: 10 (20 for Cassandra pool)
  • Example: workers = 100

Number of workers to be started by the pool.

"},{"location":"configuration/outgoing-connections/#outgoing_poolscall_timeout","title":"outgoing_pools.*.*.call_timeout","text":"
  • Syntax: positive integer
  • Default: 5000 (60000 for RDBMS pool)
  • Example: call_timeout = 3000

Number of milliseconds after which a call to the pool will time out.

"},{"location":"configuration/outgoing-connections/#connection-options","title":"Connection options","text":"

Options specific to a pool connection are defined in a subsection starting with [outgoing_pools.*.*.connection]. For example:

[outgoing_pools.rdbms.default]\n  scope = \"global\"\n  workers = 5\n\n  [outgoing_pools.rdbms.default.connection]\n  ...\n
"},{"location":"configuration/outgoing-connections/#rdbms-options","title":"RDBMS options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectiondriver","title":"outgoing_pools.rdbms.*.connection.driver","text":"
  • Syntax: string, one of \"pgsql\", \"mysql\" or \"odbc\" (a supported driver)
  • Default: none - this option is mandatory
  • Example: driver = \"psgql\"

Selects the driver for RDBMS connection. The choice of a driver impacts the set of available options.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionkeepalive_interval","title":"outgoing_pools.rdbms.*.connection.keepalive_interval","text":"
  • Syntax: positive integer
  • Default: not set - disabled by default
  • Example: keepalive_interval = 30

When enabled, MongooseIM will send SELECT 1query through every DB connection at given interval to keep them open. This option should be used to ensure that database connections are restarted after they became broken (e.g. due to a database restart or a load balancer dropping connections). Currently, not every network-related error returned from a database driver to a regular query will imply a connection restart.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionquery_timeout","title":"outgoing_pools.rdbms.*.connection.query_timeout","text":"
  • Syntax: positive integer, in milliseconds
  • Default: 5000
  • Example: query_timeout = 5000

How long MongooseIM will wait for the database to answer for a query.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionmax_start_interval","title":"outgoing_pools.rdbms.*.connection.max_start_interval","text":"
  • Syntax: positive integer
  • Default: 30
  • Example: max_start_interval = 30

When MongooseIM fails to connect to the DB, it retries with an exponential backoff. This option limits the backoff time for faster reconnection when the DB becomes reachable again.

"},{"location":"configuration/outgoing-connections/#options-for-pgsql-and-mysql","title":"Options for pgsql and mysql","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionhost","title":"outgoing_pools.rdbms.*.connection.host","text":"
  • Syntax: string
  • Default: no default; required for pgsql and mysql
  • Example: host = \"localhost\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionport","title":"outgoing_pools.rdbms.*.connection.port","text":"
  • Syntax: string
  • Default: 5432 for pgsql; 3306 for mysql
  • Example: port = 5343
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectiondatabase","title":"outgoing_pools.rdbms.*.connection.database","text":"
  • Syntax: string
  • Default: no default; required for pgsql and mysql
  • Example: database = \"mim-db\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionusername","title":"outgoing_pools.rdbms.*.connection.username","text":"
  • Syntax: string
  • Default: no default; required for pgsql and mysql
  • Example: username = \"mim-user\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionpassword","title":"outgoing_pools.rdbms.*.connection.password","text":"
  • Syntax: string
  • Default: no default; required for pgsql and mysql
  • Example: password = \"mim-password\"

To enable TLS, you need to include the TLS section in the connection options. There is one additonal option for PostgreSQL:

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectiontlsrequired","title":"outgoing_pools.rdbms.*.connection.tls.required","text":"
  • Syntax: boolean
  • Default: false
  • Example: tls.required = true

This option can be used to enforce a TLS connection.

"},{"location":"configuration/outgoing-connections/#odbc-options","title":"ODBC options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsrdbmsconnectionsettings","title":"outgoing_pools.rdbms.*.connection.settings","text":"
  • Syntax: string
  • Default: no default; required if the \"odbc\" driver is specified
  • Example: settings = \"DSN=mydb\"

ODBC - specific string defining connection parameters.

"},{"location":"configuration/outgoing-connections/#odbc-ssl-connection-setup","title":"ODBC SSL connection setup","text":"

If you've configured MongooseIM to use an ODBC driver, then the SSL options, along other connection options, should be present in the ~/.odbc.ini file.

To enable SSL connection the sslmode option needs to be set to verify-full. Additionally, you can provide the path to the CA certificate using the sslrootcert option.

"},{"location":"configuration/outgoing-connections/#example-odbcini-configuration","title":"Example ~/.odbc.ini configuration","text":"
[mydb]\nDriver      = ...\nServerName  = ...\nPort        = ...\n...\nsslmode     = verify-full\nsslrootcert = /path/to/ca/cert\n
"},{"location":"configuration/outgoing-connections/#http-options","title":"HTTP options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolshttpconnectionhost","title":"outgoing_pools.http.*.connection.host","text":"
  • Syntax: \"http[s]://string[:integer]\"
  • Default: no default; this option is mandatory
  • Example: host = \"https://server.com:879\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolshttpconnectionpath_prefix","title":"outgoing_pools.http.*.connection.path_prefix","text":"
  • Syntax: string
  • Default: \"/\"
  • Example: path_prefix = \"/api/auth/\"

Initial part of path which will be common to all calls. Prefix will be automatically prepended to path specified by a call to the pool.

"},{"location":"configuration/outgoing-connections/#outgoing_poolshttpconnectionrequest_timeout","title":"outgoing_pools.http.*.connection.request_timeout","text":"
  • Syntax: positive integer
  • Default: 2000 (milliseconds)
  • Example: request_timeout = 5000

Number of milliseconds after which http call to the server will time out. It should be lower than call_timeout set at the pool level.

To enable TLS, you need to include the TLS section in the connection options.

"},{"location":"configuration/outgoing-connections/#redis-specific-options","title":"Redis-specific options","text":"

Redis can be used as a session manager backend. Global distribution (implemented in mod_global_distrib) requires Redis pool.

There are two important limitations:

  • for a session backend, the Tag parameter has to be equal to default
  • redis backend is not compatible with available_worker strategy.
"},{"location":"configuration/outgoing-connections/#outgoing_poolsredisconnectionhost","title":"outgoing_pools.redis.*.connection.host","text":"
  • Syntax: string
  • Default: \"127.0.0.1\"
  • Example: host = \"redis.local\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsredisconnectionport","title":"outgoing_pools.redis.*.connection.port","text":"
  • Syntax: integer, between 0 and 65535, non-inclusive
  • Default: 6379
  • Example: port = 9876
"},{"location":"configuration/outgoing-connections/#outgoing_poolsredisconnectiondatabase","title":"outgoing_pools.redis.*.connection.database","text":"
  • Syntax: non-negative integer
  • Default: 0
  • Example: database = 2

Logical database index (zero-based).

"},{"location":"configuration/outgoing-connections/#outgoing_poolsredisconnectionpassword","title":"outgoing_pools.redis.*.connection.password","text":"
  • Syntax: string
  • Default: \"\"
  • Example: password = \"topsecret\"
"},{"location":"configuration/outgoing-connections/#cassandra-options","title":"Cassandra options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolscassandraconnectionservers","title":"outgoing_pools.cassandra.*.connection.servers","text":"
  • Syntax: a TOML array of tables containing keys \"host\" and \"port\"
  • Default: [{host = \"localhost\", port = 9042}]
  • Example: servers = [{host = \"host_one\", port = 9042}, {host = \"host_two\", port = 9042}]
"},{"location":"configuration/outgoing-connections/#outgoing_poolscassandraconnectionkeyspace","title":"outgoing_pools.cassandra.*.connection.keyspace","text":"
  • Syntax: string
  • Default: \"mongooseim\"
  • Example: keyspace = \"big_mongooseim_database\"

To use plain text authentication (using cqerl_auth_plain_handler module):

"},{"location":"configuration/outgoing-connections/#outgoing_poolscassandraconnectionauthplainusername","title":"outgoing_pools.cassandra.*.connection.auth.plain.username","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: username = \"auser\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolscassandraconnectionauthplainpassword","title":"outgoing_pools.cassandra.*.connection.auth.plain.password","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: password = \"somesecretpassword\"

Support for other authentication modules may be added in the future.

To enable TLS, you need to include the TLS section in the connection options.

"},{"location":"configuration/outgoing-connections/#elasticsearch-options","title":"Elasticsearch options","text":"

Currently, only one pool tagged default can be used.

"},{"location":"configuration/outgoing-connections/#outgoing_poolselasticdefaultconnectionhost","title":"outgoing_pools.elastic.default.connection.host","text":"
  • Syntax: non-empty string
  • Default: \"localhost\"
  • Example: host = \"otherhost\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolselasticdefaultconnectionport","title":"outgoing_pools.elastic.default.connection.port","text":"
  • Syntax: positive integer
  • Default: 9200
  • Example: port = 9211

MongooseIM uses inaka/tirerl library to communicate with ElasticSearch. This library uses worker_pool in a bit different way than MongooseIM does, so the following options are not configurable:

  • call_timeout (infinity)
  • worker selection strategy (available_worker or what's set as default_strategy of worker_pool application)

The only pool-related variable you can tweak is thus the number of workers.

Run the following function in the MongooseIM shell to verify that the connection has been established:

1> mongoose_elasticsearch:health().\n{ok,#{<<\"active_primary_shards\">> => 15,<<\"active_shards\">> => 15,\n       <<\"active_shards_percent_as_number\">> => 50.0,\n       <<\"cluster_name\">> => <<\"docker-cluster\">>,\n       <<\"delayed_unassigned_shards\">> => 0,\n       <<\"initializing_shards\">> => 0,\n       <<\"number_of_data_nodes\">> => 1,\n       <<\"number_of_in_flight_fetch\">> => 0,\n       <<\"number_of_nodes\">> => 1,\n       <<\"number_of_pending_tasks\">> => 0,\n       <<\"relocating_shards\">> => 0,\n       <<\"status\">> => <<\"yellow\">>,\n       <<\"task_max_waiting_in_queue_millis\">> => 0,\n       <<\"timed_out\">> => false,\n       <<\"unassigned_shards\">> => 15}}\n

Note that the output might differ based on your ElasticSearch cluster configuration.

"},{"location":"configuration/outgoing-connections/#rabbitmq-options","title":"RabbitMQ options","text":"

The Tag parameter must be set to event_pusher in order to be able to use the pool for mod_event_pusher_rabbit. Any other Tag can be used for other purposes.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionhost","title":"outgoing_pools.rabbit.*.connection.host","text":"
  • Syntax: string
  • Default: \"localhost\"
  • Example: host = \"anotherhost\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionport","title":"outgoing_pools.rabbit.*.connection.port","text":"
  • Syntax: integer
  • Default: 5672
  • Example: port = 4561
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionusername","title":"outgoing_pools.rabbit.*.connection.username","text":"
  • Syntax: string
  • Default: \"guest\"
  • Example: username = \"corpop\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionpassword","title":"outgoing_pools.rabbit.*.connection.password","text":"
  • Syntax: string
  • Default: \"guest\"
  • Example: password = \"guest\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionconfirms_enabled","title":"outgoing_pools.rabbit.*.connection.confirms_enabled","text":"
  • Syntax: boolean
  • Default: false
  • Example: confirms_enabled = false

Enables/disables one-to-one publishers confirms.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsrabbitconnectionmax_worker_queue_len","title":"outgoing_pools.rabbit.*.connection.max_worker_queue_len","text":"
  • Syntax: non-negative integer or \"infinity\"
  • Default: 1000
  • Example: max_worker_queue_len = \"infinity\"

Sets a limit of messages in a worker's mailbox above which the worker starts dropping the messages. If a worker message queue length reaches the limit, messages from the head of the queue are dropped until the queue length is again below the limit. Use infinity to disable.

"},{"location":"configuration/outgoing-connections/#ldap-options","title":"LDAP options","text":""},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionservers","title":"outgoing_pools.ldap.*.connection.servers","text":"
  • Syntax: an array of strings
  • Default: [\"localhost\"]
  • Example: servers = [\"ldap_one\", \"ldap_two\"]
"},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionport","title":"outgoing_pools.ldap.*.connection.port","text":"
  • Syntax: integer
  • Default: 389 (or 636 if TLS is enabled)
  • Example: port = 800
"},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionroot_dn","title":"outgoing_pools.ldap.*.connection.root_dn","text":"
  • Syntax: string
  • Default: empty string
  • Example: root_dn = \"cn=admin,dc=example,dc=com\"

Leaving out this option makes it an anonymous connection, which most likely is what you want.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionpassword","title":"outgoing_pools.ldap.*.connection.password","text":"
  • Syntax: string
  • Default: empty string
  • Example: password = \"topsecret\"
"},{"location":"configuration/outgoing-connections/#outgoing_poolsldapconnectionconnect_interval","title":"outgoing_pools.ldap.*.connection.connect_interval","text":"
  • Syntax: positive integer
  • Default: 10000
  • Example: connect_interval = 20000

Reconnect interval after a failed connection.

To enable TLS, you need to include the TLS section in the connection options.

"},{"location":"configuration/outgoing-connections/#tls-options","title":"TLS options","text":"

TLS options for a given pool type/tag pair are defined in a subsection starting with [outgoing_pools.[pool_type].[pool_tag].connection.tls].

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsverify_mode","title":"outgoing_pools.*.*.connection.tls.verify_mode","text":"
  • Syntax: string, one of: \"peer\", \"selfsigned_peer\", \"none\"
  • Default: \"peer\"
  • Example: tls.verify_mode = \"none\"

Specifies the way server certificate verification works:

  • peer - makes sure the server certificate is valid and signed by a trusted CA. Requires a valid cacertfile.
  • selfsigned_peer - makes sure the server certificate is valid, but allows self-signed certificates. Requires a valid cacertfile.
  • none - server certificate is not checked.
"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlscertfile","title":"outgoing_pools.*.*.connection.tls.certfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.certfile = \"server.pem\"

Path to the X509 PEM file with a certificate. If the certificate is signed by an intermediate CA, you should specify here the whole CA chain by concatenating all public keys together.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlscacertfile","title":"outgoing_pools.*.*.connection.tls.cacertfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.cacertfile = \"ca.pem\"

Path to the X509 PEM file with a CA chain that will be used to verify clients. It won't have any effect if verify_mode is set to \"none\".

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlskeyfile","title":"outgoing_pools.*.*.connection.tls.keyfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.keyfile = \"key.pem\"

Path to the X509 PEM file with the private key.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlspassword","title":"outgoing_pools.*.*.connection.tls.password","text":"
  • Syntax: string
  • Default: not set
  • Example: tls.password = \"secret\"

Password to the X509 PEM file with the private key.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsciphers","title":"outgoing_pools.*.*.connection.tls.ciphers","text":"
  • Syntax: string with the OpenSSL cipher suite specification
  • Default: not set, all supported cipher suites are accepted
  • Example: tls.ciphers = \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384\"

Cipher suites to use. Please refer to the OpenSSL documentation for the cipher string format. For allowed values, see the Erlang/OTP SSL documentation.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsversions","title":"outgoing_pools.*.*.connection.tls.versions","text":"
  • Syntax: list of strings
  • Default: not set, all supported versions are accepted
  • Example: tls.versions = [\"tlsv1.2\", \"tlsv1.3\"]

TLS protocol versions to use. For allowed values, see the Erlang/OTP SSL documentation

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsserver_name_indicationenabled","title":"outgoing_pools.*.*.connection.tls.server_name_indication.enabled","text":"
  • Syntax: boolean
  • Default: \"true\", but effective only if verify_mode is not \"none\".
  • Example: tls.server_name_indication.enabled = false

Enables SNI extension to TLS protocol. You can set it to false to disable the extension.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsserver_name_indicationhost","title":"outgoing_pools.*.*.connection.tls.server_name_indication.host","text":"
  • Syntax: string
  • Default: not set
  • Example: tls.server_name_indication.host = \"domain.com\"

Domain against which the certificates will be checked, using SNI.

"},{"location":"configuration/outgoing-connections/#outgoing_poolsconnectiontlsserver_name_indicationprotocol","title":"outgoing_pools.*.*.connection.tls.server_name_indication.protocol","text":"
  • Syntax: string, one of \"default\" or \"https\"
  • Default: \"default\"
  • Example: tls.server_name_indication_protocol = \"https\"

See the OTP documentation for an explanation. You'd usually want to set it to \"https\" for reasons described in the security recommendations.

"},{"location":"configuration/release-options/","title":"Release Options","text":"

When building a MongooseIM release from source code, the initial configuration files are generated with options taken from the vars-toml.config file found in the [MongooseIM root]/rel/ directory. You can change the values in this file to affect the resulting vm.args and mongooseim.toml files.

The file contains erlang tuples terminated with period ('.'). For users not familiar with Erlang syntax, here is a quick cheat sheet:

  • Each config option (key and value) is a tuple. Tuples are (Erlangers, forgive us the simplification) other Erlang terms separated with commas and enclosed in curly brackets ({}).
  • Tuples (at least the top-level ones) in vars.config are always 2-element.
  • The first element of each tuple is the name (Erlang atom).
  • The second element is a quoted string. Any quotes (\") inside the string should be escaped with a backslash (\\).

There are two types of options: parameters and blocks:

  • a parameter is inserted into the value of an already defined option. Parameters are mandatory - a valid value has to be provided.
  • a block can be an empty string, one line or multiple lines, defining zero, one or more options. Blocks are optional - the default is an empty string.
"},{"location":"configuration/release-options/#vmargs-options","title":"vm.args options","text":"

These options are inserted into the rel/files/vm.args template.

"},{"location":"configuration/release-options/#node_name","title":"node_name","text":"
  • Type: parameter
  • Option: value of -sname in vm.args
  • Syntax: Erlang node name: name@host
  • Example: {node_name, \"mongooseim@localhost\"}.
"},{"location":"configuration/release-options/#highload_vm_args","title":"highload_vm_args","text":"
  • Type: block
  • Option: arguments in vm.args: +K, +A, +P, -env ERL_MAX_PORTS
  • Syntax: command-line arguments
  • Example: {highload_vm_args, \"+P 10000000 -env ERL_MAX_PORTS 250000\"}.
"},{"location":"configuration/release-options/#epmd_module","title":"epmd_module","text":"

Allows to set EPMD module to mongoose_epmd in case CETS is used with RDBMS backend to enable getting IP addresses of the remote nodes using RDBMS instead of the default resolver.

  • Type: parameter
  • Option: value of -epmd_module in vm.args
  • Syntax: Erlang module name: mongoose_epmd
  • Example: {epmd_module, \"mongoose_epmd\"}.
"},{"location":"configuration/release-options/#toml-options","title":"TOML Options","text":"

These options are inserted into the rel/files/mongooseim.toml template.

"},{"location":"configuration/release-options/#hosts","title":"hosts","text":"
  • Type: parameter
  • Option: general.hosts
  • Syntax: comma-separated list of strings
  • Example: {hosts, \"\\\"localhost\\\", \\\"domain2\\\"\"}.
"},{"location":"configuration/release-options/#host_config","title":"host_config","text":"
  • Type: block
  • Option: host_config
  • Syntax: TOML block, one or more [[host_config]] sections.
  • Example:
{host_config, \"\n[[host_config]]\n  host = \\\"anonymous.localhost\\\"\n\n  [host_config.auth]\n    methods = [\\\"anonymous\\\"]\n\"}.\n
"},{"location":"configuration/release-options/#auth_ldap","title":"auth_ldap","text":"
  • Type: block
  • Option: auth.ldap
  • Syntax: TOML block, the [auth.ldap] subsection
  • Example:
{auth_ldap, \"\n  [auth.ldap]\n    base = \\\"ou=Users,dc=esl,dc=com\\\"\n    filter = \\\"(objectClass=inetOrgPerson)\\\"\n\"}.\n
"},{"location":"configuration/release-options/#all_metrics_are_global","title":"all_metrics_are_global","text":"
  • Type: parameter
  • Option: general.all_metrics_are_global
  • Syntax: boolean
  • Example: {all_metrics_are_global, \"false\"}.
"},{"location":"configuration/release-options/#s2s_addr","title":"s2s_addr","text":"
  • Type: block
  • Option: auth.s2s.address
  • Syntax: TOML key-value pair with the address option
  • Example:
{s2s_addr, \"\n  address = [\n    {host = \\\"my.xmpp.org\\\", ip_address = \\\"192.0.100.1\\\"},\n    {host = \\\"your.xmpp.org\\\", ip_address = \\\"192.0.1.100\\\", port = 5271}\n  ]\n\"}.\n
"},{"location":"configuration/release-options/#s2s_default_policy","title":"s2s_default_policy","text":"
  • Type: parameter
  • Option: s2s.default_policy
  • Syntax: string
  • Example: {s2s_default_policy, \"\\\"deny\\\"\"}.
"},{"location":"configuration/release-options/#outgoing_s2s_port","title":"outgoing_s2s_port","text":"
  • Type: parameter
  • Option: s2s.outgoing.port
  • Syntax: integer
  • Example: {outgoing_s2s_port, \"5269\"}.
"},{"location":"configuration/release-options/#c2s_port","title":"c2s_port","text":"
  • Type: parameter
  • Option: listen.c2s.port
  • Syntax: integer
  • Example: {c2s_port, \"5222\"}.
"},{"location":"configuration/release-options/#s2s_port","title":"s2s_port","text":"
  • Type: parameter
  • Option: listen.s2s.port
  • Syntax: integer
  • Example: {s2s_port, \"5269\"}.
"},{"location":"configuration/release-options/#cowboy_port","title":"cowboy_port","text":"
  • Type: parameter
  • Option: listen.http.port
  • Syntax: integer
  • Example: {http_port, \"5280\"}.
"},{"location":"configuration/release-options/#mod_last","title":"mod_last","text":"
  • Type: block
  • Option: modules.mod_last
  • Syntax: TOML section: [modules.mod_last]
  • Example: {mod_last, \"[modules.mod_last]\"}.
"},{"location":"configuration/release-options/#mod_offline","title":"mod_offline","text":"
  • Type: block
  • Option: modules.mod_offline
  • Syntax: TOML section: [modules.mod_offline]
  • Example:
{mod_offline, \"\n[modules.mod_offline]\n  access_max_user_messages = \\\"max_user_offline_messages\\\"\n\"}.\n
"},{"location":"configuration/release-options/#mod_privacy","title":"mod_privacy","text":"
  • Type: block
  • Option: modules.mod_privacy
  • Syntax: TOML section: [modules.mod_privacy]
  • Example: {mod_privacy, \"[modules.mod_privacy]\"}.
"},{"location":"configuration/release-options/#mod_private","title":"mod_private","text":"
  • Type: block
  • Option: modules.mod_private
  • Syntax: TOML section: [modules.mod_private]
  • Example: {mod_private, \"[modules.mod_private]\"}.
"},{"location":"configuration/release-options/#mod_roster","title":"mod_roster","text":"
  • Type: block
  • Option: modules.mod_roster
  • Syntax: TOML section: [modules.mod_roster]
  • Example: {mod_roster, \"[modules.mod_roster]\"}.
"},{"location":"configuration/release-options/#mod_vcard","title":"mod_vcard","text":"
  • Type: block
  • Option: modules.mod_vcard
  • Syntax: TOML section: [modules.mod_vcard]
  • Example:
{mod_vcard, \"\n[modules.mod_vcard]\n  host = \\\"vjud.@HOST@\\\"\n\"}.\n
"},{"location":"configuration/release-options/#sm_backend","title":"sm_backend","text":"
  • Type: parameter
  • Option: general.sm_backend
  • Syntax: string
  • Example: {sm_backend, \"\\\"redis\\\"\"}.
"},{"location":"configuration/release-options/#s2s_backend","title":"s2s_backend","text":"
  • Type: parameter
  • Option: general.s2s_backend
  • Syntax: string
  • Example: {s2s_backend, \"\\\"mnesia\\\"\"}.
"},{"location":"configuration/release-options/#tls_config","title":"tls_config","text":"
  • Type: block
  • Option: listen.c2s.tls.*
  • Syntax: TOML key-value pairs
  • Example:
{tls_config, \"\n  tls.certfile = \\\"priv/ssl/fake_server.pem\\\"\n  tls.mode = \\\"starttls\\\"\n\"}.\n
"},{"location":"configuration/release-options/#auth_method","title":"auth_method","text":"
  • Type: parameter
  • Option: auth.methods
  • Syntax: comma-separated list of strings
  • Example: {auth_method, \"\\\"internal\\\"\"}.
"},{"location":"configuration/s2s/","title":"Options: S2S","text":"

The s2s section contains options configuring the server-to-server connections used to communicate with other federated XMPP servers.

Warning

Server-to-server connections do not support dynamic domains. Do not use dynamic domains when using s2s.

"},{"location":"configuration/s2s/#general-options","title":"General options","text":"

These options affect both incoming and outgoing S2S connections.

"},{"location":"configuration/s2s/#s2sdefault_policy","title":"s2s.default_policy","text":"
  • Syntax: string, \"allow\" or \"deny\"
  • Default: \"allow\"
  • Example: default_policy = \"deny\"

Default policy for opening new S2S connections to/from remote servers.

"},{"location":"configuration/s2s/#s2shost_policy","title":"s2s.host_policy","text":"
  • Syntax: array of TOML tables with the following mandatory content:
    • host - string, host name
    • policy - string, \"allow\" or \"deny\"
  • Default: not set, default_policy is used
  • Example:
  host_policy = [\n    {host = \"good.xmpp.org\", policy = \"allow\"},\n    {host = \"bad.xmpp.org\", policy = \"deny\"}\n  ]\n

Policy for opening new connections to/from specific remote servers.

"},{"location":"configuration/s2s/#s2suse_starttls","title":"s2s.use_starttls","text":"
  • Syntax: string, one of \"false\", \"optional\", \"required\", \"required_trusted\"
  • Default: \"false\"
  • Example: use_starttls = \"required\"

Allows to configure StartTLS for incoming and outgoing S2S connections:

  • false - StartTLS is disabled,
  • optional - StartTLS is supported,
  • required - StartTLS is supported and enforced,
  • required_trusted - StartTLS is supported and enforced with certificate verification.
"},{"location":"configuration/s2s/#s2scertfile","title":"s2s.certfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: certfile = \"cert.pem\"

Path to the X509 PEM file with a certificate and a private key inside (not protected by any password). Required if use_starttls is not false.

"},{"location":"configuration/s2s/#s2sshared","title":"s2s.shared","text":"
  • Syntax: string
  • Default: 10 strong random bytes, hex-encoded
  • Example: shared = \"82gc8b23ct7824\"

S2S shared secret used in the Server Dialback extension.

"},{"location":"configuration/s2s/#outgoing-connections","title":"Outgoing connections","text":"

The options listed below affect only the outgoing S2S connections.

"},{"location":"configuration/s2s/#s2saddress","title":"s2s.address","text":"
  • Syntax: array of TOML tables with the following content:
    • host - string, mandatory, host name
    • ip_address - string, mandatory, IP address
    • port - integer, optional, port number
  • Default: not set
  • Example:
  address = [\n    {host = \"my.xmpp.org\", ip_address = \"192.0.100.1\"},\n    {host = \"your.xmpp.org\", ip_address = \"192.0.1.100\", port = 5271}\n  ]\n

This option defines IP addresses and port numbers for specific non-local XMPP domains, allowing to override the DNS lookup for outgoing S2S connections.

"},{"location":"configuration/s2s/#s2sciphers","title":"s2s.ciphers","text":"
  • Syntax: string
  • Default: \"TLSv1.2:TLSv1.3\"
  • Example: ciphers = \"TLSv1.2\"

Defines a list of accepted SSL ciphers for outgoing S2S connections. Please refer to the OpenSSL documentation for the cipher string format.

"},{"location":"configuration/s2s/#s2smax_retry_delay","title":"s2s.max_retry_delay","text":"
  • Syntax: positive integer
  • Default: 300
  • Example: max_retry_delay = 300

Specifies the maximum time in seconds that MongooseIM will wait until the next attempt to connect to a remote XMPP server. The delays between consecutive attempts will be doubled until this limit is reached.

"},{"location":"configuration/s2s/#s2soutgoingport","title":"s2s.outgoing.port","text":"
  • Syntax: integer, port number
  • Default: 5269
  • Example: outgoing.port = 5270

Defines the port to be used for outgoing S2S connections.

"},{"location":"configuration/s2s/#s2soutgoingip_versions","title":"s2s.outgoing.ip_versions","text":"
  • Syntax: array of integers (IP versions): 4 or 6
  • Default: [4, 6]
  • Example: outgoing.ip_versions = [6]

Specifies the order of IP address families to try when establishing an outgoing S2S connection.

"},{"location":"configuration/s2s/#s2soutgoingconnection_timeout","title":"s2s.outgoing.connection_timeout","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 10_000
  • Example: outgoing.connection_timeout = 5000

Timeout (in milliseconds) for establishing an outgoing S2S connection.

"},{"location":"configuration/s2s/#s2sdnstimeout","title":"s2s.dns.timeout","text":"
  • Syntax: positive integer
  • Default: 10
  • Example: dns.timeout = 30

Timeout (in seconds) for DNS lookups when opening an outgoing S2S connection.

"},{"location":"configuration/s2s/#s2sdnsretries","title":"s2s.dns.retries","text":"
  • Syntax: positive integer
  • Default: 2
  • Example: dns.retries = 1

Number of DNS lookup attempts when opening an outgoing S2S connection.

"},{"location":"configuration/shaper/","title":"Options: Shaper","text":"

The shaper section specifies traffic shapers used to limit the incoming XMPP traffic, providing a safety valve to protect the server. It can be used to prevent DoS attacks or to calm down too noisy clients.

  • Syntax: each shaper is specified in a subsection starting with [shaper.name] where name is used to uniquely identify the shaper.
  • Default: no default - each shaper needs to be specified explicitly.
  • Example: the normal shaper is used for the C2S connections.
[shaper.normal]\n  max_rate = 1000\n
"},{"location":"configuration/shaper/#traffic-shaper-options","title":"Traffic shaper options","text":""},{"location":"configuration/shaper/#shapermaxrate","title":"shaper.maxrate","text":"
  • Syntax: positive integer
  • Default: no default, this option is mandatory
  • Example: maxrate = 1000

Defines the maximum accepted rate. For the shapers used by XMPP listeners this is the number of bytes per second, but there are shapers that use different units, e.g. MAM shapers.

"},{"location":"configuration/shaper/#examples","title":"Examples","text":"

The following examples show the typical shaper definitions.

"},{"location":"configuration/shaper/#c2s-shaper","title":"C2S Shaper","text":"

This is the typical definition of an XMPP shaper, which accepts the maximum data rate of 1 kbps. When the rate is exceeded, the receiver pauses before processing the next packet.

[shaper.normal]\n  max_rate = 1000\n

To make use of it, the corresponding rule should be defined in the access section. Finally, the C2S listener has to be configured to use the defined shaper - see the C2S Example.

"},{"location":"configuration/shaper/#s2s-shaper","title":"S2S Shaper","text":"

For S2S connections we need to increase the limit as they receive the accumulated traffic from multiple users - e.g. to 50 kbps:

[shaper.fast]\n  max_rate = 50_000\n

To make use of it, the corresponding rule should be defined in the access section. Finally, the S2S listener has to be configured to use the defined shaper - see the S2S Example.

"},{"location":"configuration/shaper/#mam-shapers","title":"MAM Shapers","text":"

These shapers limit the number of MAM operations per second (rather than bytes per second).

[shaper.mam_shaper]\n  max_rate = 1\n\n[shaper.mam_global_shaper]\n  max_rate = 1000\n

To make use of them, the corresponding rules should be defined in the access section.

"},{"location":"developers-guide/Basic-iq-handler/","title":"Basic IQ Handler","text":"

XMPP stands for Extensible Messaging and Presence Protocol. One way the protocol can be extended is by defining new types of queries, or IQs, that XMPP entities should be able to handle. It's usual that a XEP defining some XMPP extension contains some new type of IQ. IQs can also be used to implement custom features - required in a particular problem domain - but not defined by any official XEP.

This tutorial will show you how to add and test a simple module with an IQ handler to MongooseIM. gen_iq_handler module provides functionality for registering IQ handlers for specific namespaces.

"},{"location":"developers-guide/Basic-iq-handler/#clone-build","title":"Clone & build","text":"

See How-to-build for details on building MongooseIM from source code.

"},{"location":"developers-guide/Basic-iq-handler/#create-a-module-add-a-basic-iq-handler","title":"Create a module & add a basic IQ handler","text":"

Go to src/ and create a basic module implementing the gen_mod behaviour. In start/2 register the IQ handler with a specified namespace, type (IQ processing policy), and function which will handle the incoming IQ stanza. In stop/1 remove the registered handler. Implement the function for handler:

  • If the incoming IQ stanza is of type get or set it will be returned with the type set to result.

  • If the server doesn't recognise the hostname, the returning stanza will be of type error.

See Server Rules for Processing XML Stanzas for more detailed information on the topic.

-module(mod_iq_example).\n-behaviour(gen_mod).\n\n-include(\"mongoose.hrl\").\n-include(\"jlib.hrl\").\n\n%% gen_mod callbacks\n-export([start/2, stop/1]).\n\n%% IQ handlers\n-export([process_iq/4]).\n\nstart(HostType, _Opts) ->\n    gen_iq_handler:add_iq_handler_for_domain(HostType, <<\"erlang-solutions.com:example\">>,\n                                  ejabberd_sm, process_iq, #{}, no_queue).\nstop(HostType) ->\n    gen_iq_handler:remove_iq_handler_for_domain(HostType, <<\"erlang-solutions.com:example\">>, ejabberd_sm).\n\nprocess_iq(_From, _To, Acc, IQ) ->\n    IQRes = IQ#iq{type = result},\n    ?LOG_INFO(#{what => example_handler, acc => Acc, iq_result => IQRes}),\n    {Acc, IQRes}.\n
"},{"location":"developers-guide/Basic-iq-handler/#test-your-handler","title":"Test your handler","text":"

Go to big_tests/tests and create a test suite for your handler. Implement the test case for success and failure. We will register two users, which are predefined in $REPO/big_tests/test.config:

{alice, [\n    {username, <<\"alicE\">>},\n    {server, <<\"localhost\">>},\n    {password, <<\"matygrysa\">>}]},\n{alice_bis, [\n    {username, <<\"alicE\">>},\n    {server, <<\"localhost.bis\">>},\n    {host, <<\"localhost\">>},\n    {password, <<\"matygrysa\">>}]},\n

Our IQ handler will be enabled only for one domain, localhost. After sending an IQ stanza to alice we should get a result, but as our IQ handler is not enabled for localhost.bis domain, we should get an error.

-module(mod_iq_example_SUITE).\n\n-export([all/0,\n         groups/0,\n         suite/0,\n         init_per_suite/1,\n         end_per_suite/1,\n         init_per_group/2,\n         end_per_group/2,\n         init_per_testcase/2,\n         end_per_testcase/2]).\n\n%% Tests\n-export([should_return_result/1,\n         should_return_error/1]).\n\n-include_lib(\"exml/include/exml.hrl\").\n\n-define(EXAMPLE_NS, <<\"erlang-solutions.com:example\">>).\n-define(USERS, [alice, alice_bis]).\n\n-import(distributed_helper, [mim/0,\n                             require_rpc_nodes/1,\n                             rpc/4]).\n\n%%--------------------------------------------------------------------\n%% Suite configuration\n%%--------------------------------------------------------------------\n\nall() ->\n    [{group, mod_iq_example}].\n\ngroups() ->\n    G = [{mod_iq_example, [], [should_return_result,\n                               should_return_error]}],\n    ct_helper:repeat_all_until_all_ok(G).\n\nsuite() ->\n    require_rpc_nodes([mim]) ++ escalus:suite().\n\n%%--------------------------------------------------------------------\n%% Init & teardown\n%%--------------------------------------------------------------------\n\ninit_per_suite(Config) ->\n    Domain = ct:get_config({hosts, mim, domain}),\n    dynamic_modules:start(Domain, mod_iq_example, [no_opts]),\n    escalus:init_per_suite(Config).\n\nend_per_suite(Config) ->\n    Domain = ct:get_config({hosts, mim, domain}),\n    dynamic_modules:stop(Domain, mod_iq_example),\n    escalus:end_per_suite(Config).\n\ninit_per_group(_, Config) ->\n    escalus:create_users(Config, ?USERS).\n\nend_per_group(_, Config) ->\n    escalus:delete_users(Config, ?USERS).\n\ninit_per_testcase(CaseName, Config) ->\n    escalus:init_per_testcase(CaseName, Config).\n\nend_per_testcase(CaseName, Config) ->\n    escalus:end_per_testcase(CaseName, Config).\n\n%%--------------------------------------------------------------------\n%% Tests\n%%--------------------------------------------------------------------\n\nshould_return_result(Config) ->\n    %% given\n    escalus:story(Config, [{alice, 1}], fun(Alice) ->\n        %% when sending a request\n        Req = escalus_stanza:iq_get(?EXAMPLE_NS, [#xmlel{name = <<\"example\">>}]),\n        ct:pal(\"req: ~p\", [Req]),\n        escalus:send(Alice, Req),\n        %% then we should get a result\n        Res = escalus:wait_for_stanza(Alice),\n        ct:pal(\"res: ~p\", [Res]),\n        escalus:assert(is_iq, [<<\"result\">>, ?EXAMPLE_NS], Res)\n    end).\n\nshould_return_error(Config) ->\n    %% given\n    escalus:story(Config, [{alice_bis, 1}], fun(Alice) ->\n        %% when sending a request with unregistered server\n        Req = escalus_stanza:iq_get(?EXAMPLE_NS, [#xmlel{name = <<\"example\">>}]),\n        ct:pal(\"req: ~p\", [Req]),\n        escalus:send(Alice, Req),\n        %% then we should get an error\n        Res = escalus:wait_for_stanza(Alice),\n        ct:pal(\"res: ~p\", [Res]),\n        escalus:assert(is_iq, [<<\"error\">>, ?EXAMPLE_NS], Res),\n        escalus:assert(is_error, [<<\"cancel\">>, <<\"service-unavailable\">>], Res)\n    end).\n
"},{"location":"developers-guide/Basic-iq-handler/#run-it","title":"Run it","text":"

Compile & generate releases for testing purposes according to How-to-build. Go to $REPO/_build/mim1/rel/mongooseim and start one MongooseIM node.

bin/mongooseim live\n
Open up a new terminal window, go to $REPO and use the test runner. Run single suite with the already started mim1 node.

source tools/test-runner-complete.sh\ntest-runner.sh --rerun-big-tests -- mod_iq_example\n
"},{"location":"developers-guide/Bootstrap-Scripts/","title":"Bootstrap scripts","text":"

The scripts are located in the rel/files/scripts/ directory in the MongooseIM repository.

By default the bootstrap command executes bootstrap01-hello.sh, which just prints the information below:

./_build/prod/rel/mongooseim/bin/mongooseimctl bootstrap\n\nExecute /Users/mikhailuvarov/erlang/esl/MongooseIM/_build/prod/rel/mongooseim/scripts/bootstrap01-hello.sh\nHello from /Users/mikhailuvarov/erlang/esl/MongooseIM/_build/prod/rel/mongooseim/scripts/bootstrap01-hello.sh script.\nMongooseIM is installed into /Users/mikhailuvarov/erlang/esl/MongooseIM/_build/prod/rel/mongooseim\n

Execution of scripts stops with an error, if any of scripts fail.

Environment variables, available from scripts:

  • ERTS_PATH - path to Erlang Runtime System, used by MongooseIM.
  • MIM_DIR - MongooseIM release installation directory.
"},{"location":"developers-guide/Bootstrap-Scripts/#templating-bootstrap-script","title":"Templating bootstrap script","text":"

The script bootstrap20-template.escript renders files from the templates/ directory and writes result files into the etc/ directory. If you need the result files in a separate directory, create another script bootstrap30-template.sh, that moves files into a proper location.

The etc/templates.ini file contains default template variables.

A template config example:

[options]\n  demo_session_lifetime = 600\n  demo_tls_versions = 'tlsv1.2', 'tlsv1.3'\n

Only lowercase variables are allowed in templates.ini.

You can redeclare options using environment variables when executing the bootstrap script:

MIM_DEMO_SESSION_LIFETIME=700 mongooseimctl bootstrap\n

Environment variables should have a MIM_ prefix. The variable names are case-insensitive (but we suggest to use the uppercase variable names for consistency).

"},{"location":"developers-guide/Bootstrap-Scripts/#demo-template","title":"Demo template","text":"

A demo template is located in rel/files/templates/demo.config. It is copied into the /templates directory inside your release directory.

"},{"location":"developers-guide/Bootstrap-Scripts/#testing-templating-scripts","title":"Testing templating scripts","text":"

Templating script source code: rel/files/scripts/bootstrap20-template.escript.

Testing script code:

tools/pkg/scripts/smoke_test.sh\ntools/pkg/scripts/smoke_templates.escript\n

Testing command:

PRESET=pkg pkg_PLATFORM=ubuntu_xenial ESL_ERLANG_PKG_VER=23.3.1-2 ./tools/test.sh\n
"},{"location":"developers-guide/Hooks-and-handlers/","title":"Hooks, handlers and accumulators","text":"

The hooks and handlers mechanism is one of the core architectural features of MongooseIM. It allows for loose coupling between components of the system by calling only those which are available and configured to be used at runtime.

It can be thought of as a simple eventing mechanism notifying about certain things happening in the server. That results in an extensible system with pluggable extra functionality.

To focus our attention, we'll analyze mod_offline which is responsible for storing messages for delivery to users unavailable at the time of sending. mod_offline is an implementation of XEP-0203: Delayed Delivery.

"},{"location":"developers-guide/Hooks-and-handlers/#running-a-hook","title":"Running a hook","text":""},{"location":"developers-guide/Hooks-and-handlers/#basic-usage","title":"Basic usage","text":"

ejabberd_sm (ejabberd/MongooseIM session manager) is the module discovering whether the recipient of a message is available or not. That's where storing the message for later delivery takes place. It is possible, but not recommended, to save a message in an offline storage by calling mod_offline directly:

mod_offline:store_packet(Acc, From, To, Packet)\n

Note that in this example ejabberd_sm is coupled with mod_offline. I.e. if mod_offline was not available, the code would simply crash; if it was misconfigured or turned off, the behaviour would be undefined. To avoid that coupling and also to enable other (possibly yet to be written) code to carry out some action at this particular moment, ejabberd_sm calls instead:

mongoose_hooks:offline_message(Acc, From, To, Packet);\n

mongoose_hooks is a module which serves as an API for calling hooks in the server. All such modules are placed in src/hooks.

For every hook, there needs to be a function in this module written beforehand which accepts the correct arity of arguments and makes the call to actual low-level hooks mechanism. This means that there is some degree of coupling still - but this time between the ejabberd_sm module and mongoose_hooks, and the latter is always available.

The extra level of indirection introduced by this call gives the flexibility to determine at runtime what code actually gets run at this point. This depends on which handlers are registered to process the event.

offline_message is the name of the hook (in other words of the event that is being signalled); Acc is the Accumulator, described later; From, To and Packet are the arguments passed to the handler, just as they would in case of the function being called directly.

Why do we even need the mongoose_hooks module?

Why is there a module in which we have to define the hook invocation beforehand? Could we not just use the low-level hooks mechanism directly and avoid this module altogether?

This was actually the case before this module was introduced, and hooks' names were just atoms provided as an argument to this low-level API. However, we discovered it was causing problems and producing bugs, due to the lack of static code analysis. Now we can have some guarantees thanks to Dialyzer, and each hook invocation has a correct number of arguments. Thanks to this, writing handlers is easier - there is a single source of truth about how a hook is run. Remember that a given hook can be invoked from many places in many modules.

With the new mongoose_c2s implementation we introduced a new hook API module, mongoose_c2s_hooks. All such API modules are placed in the src/hooks directory.

"},{"location":"developers-guide/Hooks-and-handlers/#getting-results-from-handlers","title":"Getting results from handlers","text":"

Hook handlers are called by \"folding\". This means that each handler on a list is passed a set of arguments, and an initial value that it then modifies, returns and hands over to the next handler in line. This modified data that is processed by the series of handlers is called an accumulator - because it accumulates the results.

A simple example would look like this:

NewAcc = mongoose_hooks:a_certain_hook(Accumulator,\n                                       StateData#state.user,\n                                       StateData#state.server).\n

The initial value of the accumulator being passed through the sequence of handlers is provided with additional arguments required by the hook, as defined in the mongoose_hooks module.

Folds

If you haven't encountered the term fold before, think of it as reduce (like Array.reduce) in Ruby-speak, roughly equivalent to the Reduce step in MapReduce, sometimes called accumulate, aggregate or compress. See Wikipedia for more.

"},{"location":"developers-guide/Hooks-and-handlers/#using-accumulators","title":"Using accumulators","text":"

MongooseIM uses a dedicated data structure to accumulate data related to stanza processing (see \"Accumulators\"). It is instantiated with an incoming stanza, passed along throughout the processing chain, supplied to and returned from certain hook calls, and terminated when the stanza is leaving MongooseIM. There are some hooks which don't use this data structure.

If a Mongoose accumulator is passed to a hook, handlers should store their return values in one of 3 ways:

  • If it is a one-off value which doesn't need to be passed on along with the accumulator (can be overwritten any time), use mongoose_acc:set(hook, result, Value, Acc).
  • If the value is to be passed on to be reused within the current processing context, use mongoose_acc:set(Namespace, Key, Value, Acc).
  • If the value should be passed on to the recipient's session, pubsub node etc. use mongoose_acc:set_permanent(Namespace, Key, Value, Acc).

A real life example, then, with regard to mod_offline is the resend_offline_messages hook run in mod_presence:

Acc1 = mongoose_hooks:resend_offline_messages(Acc, Jid),\nRs = mongoose_acc:get(offline, messages, [], Acc1),\n
"},{"location":"developers-guide/Hooks-and-handlers/#error-handling-in-hooks","title":"Error handling in hooks","text":"

Hooks are meant to decouple modules; in other words, the caller signals that some event took place or that it intends to use a certain feature or a set of features, but how and if those features are implemented is beyond its interest. For that reason hooks don't use the \"let it crash\" approach. Instead, it is rather like \"fire-and-forget\", more similar in principle to the Pid ! signal way.

In practical terms: if a handler throws an error, the hook machine logs a message and proceeds to the next handler with an unmodified accumulator. If there are no handlers registered for a given hook, the call simply has no effect.

"},{"location":"developers-guide/Hooks-and-handlers/#sidenote-code-yet-to-be-written","title":"Sidenote: Code yet to be written","text":"

Let's imagine, that when building a minimum viable product we settle on using mod_offline for delayed delivery of messages to unavailable clients. However, while the product evolves (or the relevant client software catches up) we might drop mod_offline in favour of a more sophisticated solution like Message Archive Management which would require a different action to be taken at the same point. Thanks to loose coupling and mongoose_hooks, it's possible to turn off mod_offline and turn on mod_mam without changing a single line of code in ejabberd_sm.

The only required change is to the configuration (apart from deploying the new module) which can even be performed at runtime - without restarting the server.

"},{"location":"developers-guide/Hooks-and-handlers/#sidenote-multiple-domains","title":"Sidenote: Multiple Domains","text":"

A MongooseIM cluster may serve more than one domain at the same time. E.g. it is quite common that services like Multi User Chat or Publish-Subscribe are available as subdomains of the main XMPP domain served by an installation.

Moreover, each XMPP host is of a certain type, as defined in general.host_types, and hooks can be called either globally (across all hosts/host types) or for one host type. If you are not using dynamic domains or grouping hosts under host types, then each host has a corresponding host type implicitly, and the two terms are interchangeable. Whether a hook is called globally or per host type is depends on its purpose. It is decided when creating a hook and can be checked in the mongoose_hooks module for existing hooks.

"},{"location":"developers-guide/Hooks-and-handlers/#registering-hook-handlers","title":"Registering hook handlers","text":"

In order to store a packet when ejabberd_sm runs offline_message, the relevant module must register a handler for this hook. To attain the runtime configurability the module should register the handlers when it's loaded and unregister them when it's unloaded. That's usually done in, respectively, start/2 and stop/1 functions. Here is the relevant snippet from mod_offline:start/2:

gen_hook:add_handlers(hooks(HostType)),\n
and the hooks/1 function returns a list of tuples describing hook handlers, like:
{offline_message, HostType, fun ?MODULE:inspect_packet/3, #{}, 50}\n

It is clearly visible that the handler inspect_packet is added to the offline_message hook.

HostType is the one for which the handler will be executed. In the case of statically defined domains, it is the same as the host, as configured in the general.hosts section.

The handler itself is specified as a fun expression; the arity of the function is always 3 - more about actual arguments in the Writing handlers section. If the handler expects an incorrect number of arguments, it will simply crash.

The 4th element of this tuple is a map of static parameters that will be passed to every invocation of the handler. It allows to specify additional handler config at the moment of its registering.

Multiple handlers may be registered for the same hook. The last argument, 50, is the sequence number of this handler in the handler chain. The higher the number, the later in the sequence the handler will be executed. It's reasonable to keep this number small (e.g. in the range 0-100), though there's no real limit other than the size of the integer type in the Erlang VM.

"},{"location":"developers-guide/Hooks-and-handlers/#unregistering-handlers","title":"Unregistering handlers","text":"

Pluggability also requires the components to be unpluggable at will. For that purpose there's the option to unregister a hook handler. It's done in mod_offline:stop/1 in a similar fashion to:

gen_hook:delete_handlers(hooks(Host)),\n

The function hooks/1 function returns a list of hook tuples exactly the same as passed to gen_hook:add_handlers/1. Both these functions accept either a list of tuples. There also exist functions gen_hook:add_handler/5 and gen_hook:delete_handler/5 which register and unregister one handler at a time.

"},{"location":"developers-guide/Hooks-and-handlers/#sidenote-metrics","title":"Sidenote: Metrics","text":"

Every time a hook is run, a corresponding metric of the same name in the same host is incremented by one. There are some exceptions though as some metrics were implemented before the generic hook metrics. List of hooks not updating generic metrics can be found in the mongoose_metrics:filter_hook/1 function. Such skipped hooks update metrics are defined in the mongoose_metrics_hooks module.

"},{"location":"developers-guide/Hooks-and-handlers/#writing-handlers","title":"Writing handlers","text":"

The signature of a handler has to follow these rules:

  • Accepts correct arguments:
    • Acc - accumulator which was passed from previous handler (or initial accumulator). May be mongoose_acc in particular
    • Params - map of hook parameters passed from mongoose_hooks. It is constant for every handler in one hook invocation. For exact structure check the hook function in mongoose_hooks module, as different hooks use different parameters.
    • Extra - map of additional hook parameters. It is constant for every hook invocation. It is created from the map described in Registering hook handlers section with 3 additional parameters: host_type, hook_tag, hook_name. Parameter host_type can be particularly useful.
  • Returns a tuple {ok | stop, Acc} where Acc is the accumulator of the same type as the input one, that shall be passed to the next handler (or return value in case of last handler).

Let's look at this example, from MongooseIM codebase:

in_subscription(Acc, #{to := ToJID, from := FromJID, type := Type}, _) ->\n    case process_subscription(in, ToJID, FromJID, Type) of\n        stop ->\n            {stop, Acc};\n        {stop, false} ->\n            {stop, mongoose_acc:set(hook, result, false, Acc)};\n        _ -> {ok, Acc}\n    end.\n

As seen in this example, a handler receives an accumulator, parameters and extra parameters (in this case - ignored). Then it matches to the result of process_subscription/4 and can return 3 different values:

  • {ok, Acc} - it allows further processing and does not change the accumulator.
  • {stop, mongoose_acc:set(hook, result, false, Acc)} - it stops further processing and returns accumulator with a new value in it.
  • {stop, Acc} - it stops further processing and does not change the accumulator.

This is an important feature to note: in some cases our handler returns a tuple {stop, Acc}. This skips calling later actions in the handler sequence, while the hook call returns the Acc. Further processing is only performed if the first element of return tuple is ok.

Watch out! Different handlers may be registered for the same hook - the priority mechanism orders their execution. If a handler returns {stop, Acc} but runs early in the handler chain, it may prevent some other handler from running at all! That might or might not be intentional. It may be especially surprising in case of handlers from different modules registered for the same hook. Always ensure what handlers are registered for a given hook (grep is your friend) and that you understand their interdependencies.

"},{"location":"developers-guide/Hooks-and-handlers/#hooks-list-and-how-to-extract-it","title":"Hooks list and how to extract it","text":"

The following command should give you a list of all the hooks available in MongooseIM:

awk '/\\-export\\(\\[/,/\\]\\)\\./' src/hooks/*.erl | grep -oh \"\\w*/\" | sed 's/.$//' | sort\n
It returns:
adhoc_local_commands\nadhoc_sm_commands\n...\n...\n...\nxmpp_stanza_dropped\n

It just extracts the hooks exported from mongoose_hooks and other hook API modules. Refer to grep/ack to find where they're used.

"},{"location":"developers-guide/Hooks-and-handlers/#creating-your-own-hooks","title":"Creating your own hooks","text":"

You should put the new hook inside mongoose_hooks with a correct type specification, which provides some security in places where the hooks are run. This is the way all hooks are called in MongooseIM (see the examples in the hooks description). You could run gen_hook:run_fold directly, providing the hook name, but this is advised against.

Of course, as long as no module registers handlers for a hook, calling it won't have any effects.

This is similar to the case when a module registers handlers for some hook, but that hook is never run in the code. That won't have an effect either.

"},{"location":"developers-guide/Hooks-and-handlers/#example-of-creating-a-new-hook","title":"Example of creating a new hook","text":"

The following is an example of a module which both runs and registers a few handlers for a completely new hook. The handlers are run sequentially using disparate priorities and passing over an accumulator value. One of the handlers stops the handler execution chain prematurely by returning {stop, NewVal}. It's also possible to try out what happens when the same hook is run with different XMPP domains by passing an argument to run_custom_hook/1 - we'll see that the handlers are registered for a particular domain only.

At the end, you can see a printout of an accumulator with some debugging info.

To cut the long story short:

"},{"location":"developers-guide/Hooks-and-handlers/#1-add-the-hook-with-type-specification-to-mongoose_hooks","title":"1. Add the hook with type specification to mongoose_hooks","text":"
-spec custom_new_hook(HostType, Acc, Number) -> Result when\n    HostType :: mongooseim:host_type(),\n    Acc :: mongoose_acc:t(),\n    Number :: integer(),\n    Result :: mongoose_acc:t().\ncustom_new_hook(HostType, Acc, Number) ->\n    Params = #{number => Number},\n    run_hook_for_host_type(custom_new_hook, HostType, Acc, Params).\n

Don't forget about exporting the function:

-export([custom_new_hook/3]).\n

"},{"location":"developers-guide/Hooks-and-handlers/#2-create-the-mod_hook_example-module","title":"2. Create the mod_hook_example module","text":"
-module(mod_hook_example).\n\n-behaviour(gen_mod).\n\n-include(\"mongoose.hrl\").\n\n%% API\n-export([run_custom_hook/1]).\n\n%% gen_mod callbacks\n-export([start/2,\n         stop/1]).\n\n%% Hook handlers\n-export([first_handler/3,\n         stopping_handler/3,\n         never_run_handler/3]).\n\nstart(HostType, _Opts) ->\n    gen_hook:add_handlers(hooks(HostType)).\n\nstop(HostType) ->\n    gen_hook:delete_handlers(hooks(HostType)).\n\nhooks(HostType) ->\n    [{custom_new_hook, HostType, fun ?MODULE:first_handler/3, #{extra_param => <<\"ExtraParam\">>}, 25},\n     {custom_new_hook, HostType, fun ?MODULE:stopping_handler/3, #{}, 50},\n     {custom_new_hook, HostType, fun ?MODULE:never_run_handler/3, #{}, 75}].\n\nrun_custom_hook(Host) ->\n    {ok, HostType} = mongoose_domain_api:get_domain_host_type(Host),\n    Acc = mongoose_acc:new(#{ location => ?LOCATION, lserver => Host, host_type => HostType }),\n    Acc1 = mongoose_acc:set(example, value, 5, Acc),\n    ResultAcc = mongoose_hooks:custom_new_hook(HostType, Acc1, 2),\n    ResultValue = mongoose_acc:get(example, value, ResultAcc),\n    ?LOG_INFO(#{what => hook_finished, result => ResultValue, result_acc => ResultAcc}).\n\nfirst_handler(Acc, #{number := Number}, #{extra_param := Extra}) ->\n    V0 = mongoose_acc:get(example, value, Acc),\n    Result = V0 + Number,\n    ?LOG_INFO(#{what => first_handler, value => V0, argument => Number,\n                result => Result, extra => Extra}),\n    {ok, mongoose_acc:set(example, value, Result, Acc)}.\n\nstopping_handler(Acc, #{number := Number}, _) ->\n    V0 = mongoose_acc:get(example, value, Acc),\n    Result = V0 + Number,\n    ?LOG_INFO(#{what => stopping_handler, value => V0, argument => Number, result => Result}),\n    {stop, mongoose_acc:set(example, value, Result, Acc)}.\n\nnever_run_handler(Acc, #{number := Number}, _) ->\n    ?LOG_INFO(#{what => never_run_handler,\n                text => <<\"This handler won't run as it's registered with a priority bigger \"\n                          \"than that of stopping_handler/2 is. \"\n                          \"This text should never get printed.\">>}),\n    {ok, Acc * Number}.\n

The module is intended to be used from the shell for educational purposes:

(mongooseim@localhost)1> gen_mod:is_loaded(<<\"localhost\">>, mod_hook_example).\nfalse\n(mongooseim@localhost)2> mongoose_modules:ensure_started(<<\"localhost\">>, mod_hook_example, #{}).\n{started,ok}\n(mongooseim@localhost)3> gen_mod:is_loaded(<<\"localhost\">>, mod_hook_example).\ntrue\n(mongooseim@localhost)4> mongoose_logs:set_module_loglevel(mod_hook_example, info).\nok\n(mongooseim@localhost)5> mod_hook_example:run_custom_hook(<<\"localhost\">>).\nwhen=2022-12-15T12:37:16.109544+00:00 level=info what=first_handler pid=<0.1081.0> at=mod_hook_example:first_handler/3:41 value=5 result=7 extra=ExtraParam argument=2 \nwhen=2022-12-15T12:37:16.109809+00:00 level=info what=stopping_handler pid=<0.1081.0> at=mod_hook_example:stopping_handler/3:48 value=7 result=9 argument=2 \nwhen=2022-12-15T12:37:16.110028+00:00 level=info what=hook_finished pid=<0.1081.0> at=mod_hook_example:run_custom_hook/1:36 result_acc_{example,value}=9 result_acc_timestamp=1671107836109517 result_acc_stanza=undefined result_acc_ref=#Ref<0.4046106046.1908670465.111816> result_acc_origin_pid=<0.1081.0> result_acc_origin_location_mfa={mod_hook_example,run_custom_hook,1} result_acc_origin_location_line=32 result_acc_origin_location_file=/Users/paweldlugosz/Dev/Repos/MongooseIM/src/mod_hook_example.erl result_acc_non_strippable= result_acc_mongoose_acc=true result_acc_lserver=localhost result_acc_host_type=localhost result=9 \nok\n
"},{"location":"developers-guide/OpenSSL-and-FIPS/","title":"OpenSSL FIPS","text":"

Support for OpenSSL FIPS was added to MongooseIM in version 1.7.0.

"},{"location":"developers-guide/OpenSSL-and-FIPS/#incompatibilities","title":"Incompatibilities","text":"

Currently known incompatible features are:

  • SASL auth mechanism DIGEST-MD5: due to a forbidden MD5 hash function in FIPS mode.
"},{"location":"developers-guide/OpenSSL-and-FIPS/#requirements","title":"Requirements","text":""},{"location":"developers-guide/OpenSSL-and-FIPS/#build-erlangotp-with-fips-support","title":"Build Erlang/OTP with FIPS support","text":"

Make sure the option --enable-fips is specified for configure command. If you want to use a different OpenSSL than the default one, specify the option --with-ssl=PATH_TO_YOUR_OPENSSL as well. Here's an example of a command for building Erlang/OTP with kerl:

KERL_CONFIGURE_OPTIONS=\"--enable-fips\" ./kerl build 23.3 23.3-fips\n
"},{"location":"developers-guide/OpenSSL-and-FIPS/#building-mongooseim-with-a-custom-openssl","title":"Building MongooseIM with a custom OpenSSL","text":"

If you want to use a custom OpenSSL, please export the CFLAGS and LDFLAGS env vars pointing to a FIPS compliant OpenSSL before running ./rebar3 compile or make rel.

OPENSSL_LIB=~/openssl/lib #put your path here\nOPENSSL_INC=~/openssl/inc #put your path here\n\nexport LDFLAGS=\"-Wl,-rpath=$OPENSSL_LIB -L$OPENSSL_LIB\"\nexport CFLAGS=\"-I$OPENSSL_INC\"\n
"},{"location":"developers-guide/OpenSSL-and-FIPS/#how-to-enabledisable-fips-mode","title":"How to enable/disable FIPS mode","text":"

Find etc/app.config in the release directory. FIPS mode is an option of the crypto application. In order to enable/disable it, add the following section to app.config:

{crypto, [{fips_mode, Value}]},\n

where Value is either true or false.

"},{"location":"developers-guide/OpenSSL-and-FIPS/#how-to-check-if-the-fips-mode-is-enabled","title":"How to check if the FIPS mode is enabled","text":""},{"location":"developers-guide/OpenSSL-and-FIPS/#log-message","title":"Log message","text":"

When MongooseIM starts, it prints the following log message if FIPS mode is enabled

2015-02-25 14:30:54.501 [warning] <0.242.0>@mongoose_fips:do_notify:37 FIPS mode enabled\n
"},{"location":"developers-guide/OpenSSL-and-FIPS/#run-time-check","title":"Run-time check","text":"

Run the following function in the MongooseIM console:

mongoose_fips:status().\n

The function returns:

  • not_enabled - fips_mode is not set to true in etc/app.config
  • enabled - fips_mode is set to true in etc/app.config
  • not_supported - erlang compiled without fips support
"},{"location":"developers-guide/OpenSSL-and-FIPS/#cipher-suites-difference","title":"Cipher suites difference","text":"

A test using a cipher_suites_test.sh script (available in the tools directory) can be performed on MongooseIM with FIPS mode enabled and disabled. We've used OpenSSL 1.0.1j-fips.

Here are all the cipher suites available when the FIPS mode is enabled (the list may vary for different openssl versions):

  • ECDHE-RSA-AES256-SHA
  • DHE-RSA-AES256-SHA
  • AES256-SHA
  • ECDHE-RSA-DES-CBC3-SHA
  • EDH-RSA-DES-CBC3-SHA
  • DES-CBC3-SHA
  • ECDHE-RSA-AES128-SHA
  • DHE-RSA-AES128-SHA
  • AES128-SHA

Here are all the cipher suites available when the FIPS mode is disabled (the list may vary for different openssl versions):

  • ECDHE-RSA-AES256-SHA
  • DHE-RSA-AES256-SHA
  • DHE-RSA-CAMELLIA256-SHA
  • AES256-SHA
  • CAMELLIA256-SHA
  • ECDHE-RSA-DES-CBC3-SHA
  • EDH-RSA-DES-CBC3-SHA
  • DES-CBC3-SHA
  • ECDHE-RSA-AES128-SHA
  • DHE-RSA-AES128-SHA
  • DHE-RSA-SEED-SHA
  • DHE-RSA-CAMELLIA128-SHA
  • AES128-SHA
  • SEED-SHA
  • CAMELLIA128-SHA
  • ECDHE-RSA-RC4-SHA
  • RC4-SHA
  • RC4-MD5
"},{"location":"developers-guide/SCRAM-serialization/","title":"SCRAM serialization format","text":""},{"location":"developers-guide/SCRAM-serialization/#overview","title":"Overview","text":"

This document describes the SCRAM serialization format used by MongooseIM. Developers can use this information to create advanced endpoints for ejabberd_auth_http or enable other software to read (i.e. share) the user authentication data.

"},{"location":"developers-guide/SCRAM-serialization/#format-description","title":"Format description","text":"

==MULTI_SCRAM==,<iteration count>,===SHA1===<salt>|<stored key>|<server key>,==SHA224==<salt>|<stored key>|<server key>,==SHA256==<salt>|<stored key>|<server key>,==SHA384==<salt>|<stored key>|<server key>,==SHA512=<salt>|<stored key>|<server key>

  • <iteration count> - Iteration Count formatted as a human-readable integer
  • <salt> - Base64-encoded Salt
  • <stored key> - Base64-encoded Stored Key
  • <server key> - Base64-encoded Server Key

The SCRAM format can vary depending on the SHA algorithms that are used for SCRAM. Salt and iteration count is common for different SHA types. Stored Key and Server Key are specific to a given SHA and are following a SHA prefix that is indicating which SHA they belong to.

In order to learn more about the meaning of the Stored Key, Server Key, Salt and Iteration Count, please check the SCRAM specification.

"},{"location":"developers-guide/SCRAM-serialization/#example","title":"Example","text":"
  • Password: padthai
  • Erlang map:
    #{iteration_count => 4096,\n  sha =>\n      #{salt => <<\"QClQsw/sfPEnwj4AEp6E1w==\">>,\n        server_key => <<\"EJvxXWM42tO7BgW21lNZyBc1dD0=\">>,\n        stored_key => <<\"ys1104hRhqMoRputBY5sLHKXoSw=\">>},\n  sha224 =>\n      #{salt => <<\"dk0ImXFVPoUfqD5FveV7YA==\">>,\n        server_key => <<\"EvE2EkZcUb3k4CooeOcVFy95P32t+NDX0xbQUA==\">>,\n        stored_key =>\n            <<\"G0ibQ/YYuCtoun4I+1IF2zJ7Q8x2T23ETnq5Gg==\">>},\n  sha256 =>\n      #{salt => <<\"M7BYKSo04XbzBr4C7b056g==\">>,\n        server_key =>\n            <<\"XhtGFf6NDWsnVSCO4xkzPD3qc046fPL0pATZi7RmaWo=\">>,\n        stored_key =>\n            <<\"A779MC05nSGQln5no0hKTGHFSaQ7oguKBZgORW3s+es=\">>},\n  sha384 =>\n      #{salt => <<\"Ryu0fA29gbwgqFOBk5Mczw==\">>,\n        server_key =>\n            <<\"kR+LMI/E0QBG3oF405/MTAT6NAlCOfPrFOaWH3WBVGM0Viu9Brk6kGwVwXjSP8v0\">>,\n        stored_key =>\n            <<\"k3QwC0Lb1y1/V/31byC5KML5t3mH4JTPjFyeAz7lV2l4SPfzi3JHvLEdoNB5K/VY\">>},\n  sha512 =>\n      #{salt => <<\"SLNuVNcWiNBmnYZNIdj+zg==\">>,\n        server_key =>\n            <<\"jUUDbuQ9ae4UnAWS6RV6W4yifX3La3ESjfZjGol+TBROIb/ihR8UawPHrSHkp4yyDJXtRhR9RlHCHy4bcCm1Yg==\">>,\n        stored_key =>\n            <<\"3ey3gzSsmbxcLnoc1VKCR/739uKX6uuPCyAzn6x8o87ibcjOdUaU8qhL5X4MUI9UPTt667GagNpVTmAWTFNsjA==\">>}}\n
  • Serialized password:
    ==MULTI_SCRAM==,4096,\n===SHA1===QClQsw/sfPEnwj4AEp6E1w==|ys1104hRhqMoRputBY5sLHKXoSw=|EJvxXWM42tO7BgW21lNZyBc1dD0=,\n==SHA224==dk0ImXFVPoUfqD5FveV7YA==|G0ibQ/YYuCtoun4I+1IF2zJ7Q8x2T23ETnq5Gg==|EvE2EkZcUb3k4CooeOcVFy95P32t+NDX0xbQUA==,\n==SHA256==M7BYKSo04XbzBr4C7b056g==|A779MC05nSGQln5no0hKTGHFSaQ7oguKBZgORW3s+es=|XhtGFf6NDWsnVSCO4xkzPD3qc046fPL0pATZi7RmaWo=,\n==SHA384==Ryu0fA29gbwgqFOBk5Mczw==|k3QwC0Lb1y1/V/31byC5KML5t3mH4JTPjFyeAz7lV2l4SPfzi3JHvLEdoNB5K/VY|kR+LMI/E0QBG3oF405/MTAT6NAlCOfPrFOaWH3WBVGM0Viu9Brk6kGwVwXjSP8v0,\n==SHA512==SLNuVNcWiNBmnYZNIdj+zg==|3ey3gzSsmbxcLnoc1VKCR/739uKX6uuPCyAzn6x8o87ibcjOdUaU8qhL5X4MUI9UPTt667GagNpVTmAWTFNsjA==|jUUDbuQ9ae4UnAWS6RV6W4yifX3La3ESjfZjGol+TBROIb/ihR8UawPHrSHkp4yyDJXtRhR9RlHCHy4bcCm1Yg==\n
"},{"location":"developers-guide/SCRAM-serialization/#legacy-format-description","title":"Legacy format description","text":"

MongooseIM installations older or equal to 3.6.2 were supporting only SHA-1 as a hashing algorithm for SCRAM. The SCRAM format that was used can be seen below.

==SCRAM==,<stored key>,<server key>,<salt>,<iteration count>

  • <stored key> - Base64-encoded Stored Key
  • <server key> - Base64-encoded Server Key
  • <salt> - Base64-encoded Salt
  • <iteration count> - Iteration Count formatted as a human-readable integer

In order to learn more about the meaning of the Stored Key, Server Key, Salt and Iteration Count, please check the SCRAM specification.

"},{"location":"developers-guide/SCRAM-serialization/#example_1","title":"Example","text":"
  • Password: misio
  • Erlang record: #scram{ storedkey = <<\"tmi5IE+9pceRV/jkPLFHEaVY33c=\">>, serverkey = <<\"MiWNa8T3dniVDwmh77ufJ41fpAQ=\">>, salt = <<\"inKXODlSY5y5SCsLxibi0w==\">>, iterationcount = 4096 }
  • Serialized password: ==SCRAM==,tmi5IE+9pceRV/jkPLFHEaVY33c=,MiWNa8T3dniVDwmh77ufJ41fpAQ=,inKXODlSY5y5SCsLxibi0w==,4096
"},{"location":"developers-guide/Stanza-routing/","title":"Route of a message through the system","text":"

Let's examine the flow of a message sent from Alice to Bob, both of whom are served by the same domain and connected to the server.

Note that hooks are called at various stages of routing - they perform many tasks, and many MongooseIM functionalities are implemented through hooks & handlers. For a general introduction to hooks, see Hooks and Handlers; to get a closer look at a core few, see the hooks description.

"},{"location":"developers-guide/Stanza-routing/#1-senders-c2s-process-receives-the-message","title":"1. Sender's C2S process receives the message","text":"

Alice's C2S (client-to-server) process, which is a state machine implemented in the mongoose_c2s module, receives data from the TCP socket, and parses each incoming XML element with exml to an internal representation of the stanza, which is then processed by the C2S as a subsequent event.

"},{"location":"developers-guide/Stanza-routing/#2-call-to-user_send_-hooks","title":"2. Call to user_send_* hooks","text":"

Upon some minimal validation of the stanza, the hook user_send_packet is called. Next, depending on the type of the stanza, one of the following hooks is called:

  • user_send_message for messages,
  • user_send_presence for presences,
  • user_send_iq for IQ (info/query) stanzas,
  • user_send_xmlel for other XML elements.

Each hook can be handled by multiple modules subscribed to it. Those modules do various complementary tasks, like storing the message in an archive, sending carbon copies, checking the stanza against privacy lists etc. It is possible for a handler to immediately stop routing at this point, preventing execution of any subsequent handlers or hooks. See hooks description for more information.

"},{"location":"developers-guide/Stanza-routing/#3-message-routing","title":"3. Message routing","text":"

The stanza is routed by ejabberd_router:route/3, which passes it through a chain of routing modules implementing the xmpp_router behaviour and applies the following functions for each of them:

  1. Mod:filter/3, which either drops the stanza, stopping the routing chain, or returns it for further processing, modifying it if necessary.
  2. Mod:route/3, which either handles the stanza, stopping the routing chain, or returns it for further processing, modifying it if necessary.

A list of routing modules can be set in the routing_modules option. The default behaviour is the following:

  • mongoose_router_global: runs a global filter_packet hook.
  • mongoose_router_localdomain: if there is a local route registered for the destination domain (i.e. there is an entry in the mongoose_router ETS table), routes the stanza to it. When the recipient's domain is checked for the first time, the corresponding route is not registered yet, because the routes are added lazily - see mongoose_router_dynamic_domains.
  • mongoose_router_external_localnode: if there is an external component registered for the destination domain on the current node, routes the stanza to it. Such components are stored in the Mnesia table external_component, which is not replicated in the cluster.
  • mongoose_router_external: if there is an external component registered for the destination domain on any node in the cluster, routes the stanza to it. Such components are stored in the Mnesia table external_component_global, which is replicated among all cluster nodes.
  • mongoose_router_dynamic_domains: if the recipient's domain is hosted by the local server, a route is added for it, and the stanza is routed locally.
  • ejabberd_s2s: tries to find or establish a connection to another server and send the stanza there.

Assuming that the message from Alice to Bob is not the first stanza addressed to their domain, the routing chain will stop at mongoose_router_localdomain, which will deliver the message locally.

"},{"location":"developers-guide/Stanza-routing/#4-mongoose_local_delivery","title":"4. mongoose_local_delivery","text":"

When an external component or a local route is found, the packet is delivered locally by mongoose_local_delivery:do_route/5. Firstly, the filter_local_packet hook is run to check if the stanza should be delivered or dropped. This hook is also a place where modules can add their own functionality evaluated for each locally delivered stanza.

If the check passes, the next step is to call the handler associated with the component or the local route. Handlers are modules implementing the mongoose_packet_handler behaviour, and stanzas to local users (like Alice and Bob) are handled by the ejabberd_local module.

"},{"location":"developers-guide/Stanza-routing/#5-ejabberd_local-to-ejabberd_sm","title":"5. ejabberd_local to ejabberd_sm","text":"

ejabberd_local:process_packet/5 checks if the stanza is addressed to a user or to the server itself. For local users like Bob, ejabberd_sm:route/4 is called.

"},{"location":"developers-guide/Stanza-routing/#6-ejabberd_sm","title":"6. ejabberd_sm","text":"

ejabberd_sm determines the available resources of the recipient, takes into account their priorities and whether the message is addressed to a particular resource or a bare JID. It appropriately replicates (or not) the message and sends it to the recipient's C2S process(es) by calling mongoose_c2s:route/2. In case no resources are available for delivery (hence no C2S processes to pass the message to), the offline_message hook is run.

As Bob has one online session, the message is sent to the C2S process associated with that session.

"},{"location":"developers-guide/Stanza-routing/#7-recipients-c2s-process-delivers-the-message","title":"7. Recipient's C2S process delivers the message","text":"

The user_receive_packet hook is run to notify the rest of the system about the stanza delivery. Next, depending on the type of the stanza, one of the following hooks is called:

  • user_receive_message for messages,
  • user_receive_presence for presences,
  • user_receive_iq for IQ (info/query),
  • user_receive_xmlel for other XML elements.

Each hook can be handled by multiple modules subscribed to it. These hooks' handlers can stop the routing, e.g. when the stanza is blocked by mod_privacy. Finally, the xmpp_presend_element hook is called, which is used mod_csi and mod_stream_management. This is the last hook that can stop the routing - otherwise, the stanza is converted to binary data and sent to the recipient's TCP socket.

"},{"location":"developers-guide/Testing-MongooseIM/","title":"Test runner","text":"

The test runner script is used to compile MongooseIM and run tests.

"},{"location":"developers-guide/Testing-MongooseIM/#requirements","title":"Requirements","text":""},{"location":"developers-guide/Testing-MongooseIM/#docker","title":"Docker","text":"

Docker could be installed on the local system, and the user executing the tests must have privileges to start new containers (usually achieved by adding the user to the docker group).

Alternatively, you can use Podman. Here is how to install it on Mac:

brew install podman\npodman machine init\npodman machine start\nln -s /usr/local/bin/podman /usr/local/bin/docker\n

You can also specify which container supervisor you want to use by defining an environment variable in your ~/.bashrc:

export DOCKER=podman\n
"},{"location":"developers-guide/Testing-MongooseIM/#freetds-for-mssql-connectivity","title":"FreeTDS for MSSQL connectivity","text":"

MongooseIM requires FreeTDS in order to connect to MSSQL container.

Please install the driver:

# Ubuntu\n$ sudo apt install freetds-dev tdsodbc\n\n# CentOS compatible systems (Rocky, Alma)\n$ sudo yum install freetds\n\n# macOS\n$ brew install freetds\n

In case you are using an operating system different from Ubuntu or MacOS or have a custom FreeTDS installation, you may have to modify the tools/setup-db.sh script to use the proper paths. Find a configuration block starting with [mongoose-mssql] and change the Driver and Setup. For example, for CentOS compatible systems change them to /usr/lib64/libtdsodbc.so.0 and /usr/lib64/libtdsS.so respectively.

"},{"location":"developers-guide/Testing-MongooseIM/#how-to-print-the-instructions","title":"How to print the instructions","text":"

The help command prints a list of supported options.

./tools/test-runner.sh --help\n
"},{"location":"developers-guide/Testing-MongooseIM/#test-runner-examples","title":"Test runner examples","text":"

Usage example:

./tools/test-runner.sh --db redis --preset internal_mnesia\n

The command runs both big (feature) and small (unit) tests.

To view more examples, run:

./tools/test-runner.sh --examples\n
"},{"location":"developers-guide/Testing-MongooseIM/#test-runner-completion","title":"Test runner completion","text":"

Test runner supports shell TAB completion.

To enable completion in bash or zsh, run:

source tools/test-runner-complete.sh\n

To view completion examples, run:

./tools/test-runner.sh --examples-complete\n
"},{"location":"developers-guide/Testing-MongooseIM/#viewing-test-reports","title":"Viewing test reports","text":"

To view test execution results, run:

./tools/test-runner.sh --show-big-reports\n./tools/test-runner.sh --show-small-reports\n
"},{"location":"developers-guide/Testing-MongooseIM/#rerun-big-tests","title":"Rerun big tests","text":"

Very often we want to restart a specific suite when some test failed.

For example, some test has failed in mam_SUITE. The command was used to execute tests:

./tools/test-runner.sh --skip-small-tests --db pgqsl --preset pgsql_mnesia --skip-stop-nodes\n

--skip-stop-nodes is optional here, because if any big test fails, then nodes would be still running.

We can just execute the same command, but it would rebuild nodes and start them.

The command can be used instead:

./tools/test-runner.sh --rerun-big-tests -- mam\n

--rerun-big-tests expands into --skip-small-tests --skip-setup-db --dev-nodes --test-hosts --skip-cover --skip-preset.

And mam is used to run mam_SUITE suite only.

"},{"location":"developers-guide/Testing-MongooseIM/#debugging-big-tests-database","title":"Debugging big tests database","text":"

This command opens MySQL shell interface:

./tools/open-test-database-shell.sh mysql\n

This command opens PgSQL shell interface:

./tools/open-test-database-shell.sh pgsql\n

This command opens MSSQL shell interface:

./tools/open-test-database-shell.sh mssql\n

You can use this command to execute SQL queries directly. It's useful when designing new SQL queries.

"},{"location":"developers-guide/Testing-MongooseIM/#unit-tests-aka-small-tests","title":"Unit tests (a.k.a. \"small tests\")","text":"

These test suites are aimed at testing various modules and libraries standalone, without launching a MongooseIM instance. They are very useful for developing/debugging libraries.

The test suites are located in test/ directory. To run all of them, use ./rebar3 ct; to run just a selected suite, use ./rebar3 ct --suite test/my_selected_SUITE. Rebar recompiles all the code automatically, there is no need for a separate compilation step.

If all the tests pass, you will get no output and summary log will be available in ct.log. If any of the tests fail the summary log is printed to stdout.

Detailed test results in a nice HTML format are saved in

_build/test/logs/ct_run.[something][datetime]/\n

Unit test running example using test runner:

# Run all small tests, show progress\n./tools/test-runner.sh --skip-big-tests --verbose\n\n# Run sha_SUITE without cover\n./tools/test-runner.sh --skip-big-tests --skip-cover -- sha\n\n# Run the 'general' group in config_parser_SUITE, show progress\n./tools/test-runner.sh --skip-big-tests --verbose -- config_parser:general\n
"},{"location":"developers-guide/Testing-MongooseIM/#end-to-end-tests-aka-big-tests","title":"End-to-end tests (a.k.a. \"big tests\")","text":""},{"location":"developers-guide/Testing-MongooseIM/#using-test-runner","title":"Using test runner","text":"

Most important options are preset and database:

# Runs privacy_SUITE and private_SUITE with PostgreSQL\n./tools/test-runner.sh --skip-small-tests --db pgsql --preset pgsql_mnesia -- privacy private\n\n# Runs rdbms_SUITE with MSSQL\n# Initialises a single MongooseIM node (works for some tests only)\n# Disables cover\n./tools/test-runner.sh --skip-small-tests --db mssql --preset odbc_mssql_mnesia --test-hosts mim --dev-nodes mim1 --skip-cover -- rdbms\n
"},{"location":"developers-guide/Testing-MongooseIM/#tldr","title":"TL;DR","text":"

You can also run the tests \"by hand\", instead of using the test runner.

In shell #1:

cd $MONGOOSEIM\n./rebar3 compile\nmake devrel\n

If databases are needed, for example PostgreSQL, you can run:

DB=\"pgsql\" ./tools/setup-db.sh\n

In shell #2:

cd $MONGOOSEIM/_build/mim1/rel/mongooseim\n./bin/mongooseimctl live\n

In shell #3:

cd $MONGOOSEIM/_build/mim2/rel/mongooseim\n./bin/mongooseimctl live\n

In shell #4:

cd $MONGOOSEIM/_build/mim3/rel/mongooseim\n./bin/mongooseimctl live\n

In shell #5:

cd $MONGOOSEIM/_build/fed1/rel/mongooseim\n./bin/mongooseimctl live\n

In shell #6:

cd $MONGOOSEIM/_build/reg1/rel/mongooseim\n./bin/mongooseimctl live\n

Back to shell #1:

cd big_tests/\nmake quicktest\n

Wait for the tests to finish and celebrate (or wallow in despair and grief)!

One-liner alternative for tmux users:

./rebar3 compile\nmake devrel\ntmux new-window -n mim1 '_build/mim1/rel/mongooseim/bin/mongooseimctl live'\ntmux new-window -n mim2 '_build/mim2/rel/mongooseim/bin/mongooseimctl live'\ntmux new-window -n mim3 '_build/mim3/rel/mongooseim/bin/mongooseimctl live'\ntmux new-window -n fed1 '_build/fed1/rel/mongooseim/bin/mongooseimctl live'\ntmux new-window -n reg1 '_build/fed1/rel/mongooseim/bin/mongooseimctl live'\n_build/mim1/rel/mongooseim/bin/mongooseimctl started\n_build/mim2/rel/mongooseim/bin/mongooseimctl started\n_build/mim3/rel/mongooseim/bin/mongooseimctl started\n_build/fed1/rel/mongooseim/bin/mongooseimctl started\n_build/reg1/rel/mongooseim/bin/mongooseimctl started\nmake -C big_tests quicktest\n

Start a new tmux and paste the commands.

"},{"location":"developers-guide/Testing-MongooseIM/#step-by-step-breakdown","title":"Step-by-step breakdown","text":"

make devrel builds four server nodes, preconfigured for a wide range of features covered by end-to-end tests.

  • $MONGOOSEIM/_build/mim1/rel, for most test SUITEs
  • $MONGOOSEIM/_build/mim*/rel, in order to test cluster-related commands;;
  • $MONGOOSEIM/_build/fed1/rel, in order to test XMPP federation (server to server communication, S2S).
  • $MONGOOSEIM/_build/reg1/rel, in order to test global distribution feature.

In general, running a server in the interactive mode (i.e. mongooseimctl live) is not required to test it, but it's convenient as any warnings and errors can be spotted in real time. It's also easy to inspect the server state or trace execution (e.g. using dbg) in case of anything going wrong in some of the tests. To run the server in the background instead of the interactive mode, use mongooseimctl start && mongooseimctl started.

The quicktest configuration is a relatively comprehensive one, giving good overview of what does and what doesn't work in the system, without repeating tests. Why would we want to ever repeat the tests? In order to test different backends of the same parts of the system. E.g. a message archive might store messages in MySQL/PostgreSQL or Elasticsearch - the glue code between the XMPP logic module and database is different in each case, therefore repeating the same tests with different databases is necessary to guarantee a truthful code coverage measurement.

"},{"location":"developers-guide/Testing-MongooseIM/#testing-a-feature-in-development-tdd","title":"Testing a feature in development / TDD","text":"

The whole suite takes a significant amount of time to complete. When you develop a new feature, the speed of iterating is crucial to maintain the flow (who doesn't like the feeling?!) and not lose focus.

In $MONGOOSEIM/big_tests/ we have:

$ tree big_tests/ -L 1 -F\nbig_tests/\n\u251c\u2500\u2500 Makefile\n\u251c\u2500\u2500 README.md\n\u251c\u2500\u2500 default.spec\n\u251c\u2500\u2500 test.config\n\u251c\u2500\u2500 tests/\n\u2514\u2500\u2500 ...\n

tests/ is where the test suites reside.

*.config files are the suite configuration files - they contain predefined XMPP client specifications, server addresses and XMPP domains to use, and options required by test support libraries (i.e. Escalus).

*.spec files are the test specifications - they define the configuration file to use, the suites, test groups or individual test cases to run or skip, and some less important things.

default.spec is the default when running make quicktest, but it can be overridden with a TESTSPEC variable:

# make sure we're in $MONGOOSEIM/big_tests/\ncd $MONGOOSEIM/big_tests/\nmake quicktest TESTSPEC=my-feature.spec\n

To speed up the development cycle, developers usually create a .spec file for each feature (or each project, if you're cloning away) and only enable the suites / test groups they are working on. The allows testing only the parts of the system that are actually being changed. It's worth running default.spec once in a while to check for regressions.

Consult the default.spec file to see how to run only selected tests/groups/cases.

If you're sure that none of the test dependencies have changed, and you only edited the test suites and/or MongooseIM code, it's possible to speed up the tests by skipping the Rebar dependency and compilation checks by providing PREPARE= (i.e. an empty value):

make quicktest PREPARE=\n

Consult the big_tests/Makefile to see how it works.

"},{"location":"developers-guide/Testing-MongooseIM/#applying-code-changes","title":"Applying code changes","text":"

When working on a feature or a bug fix you often modify the code and check if it works as expected. In order to change the code on dev nodes that are already generated (mim* and fed*) recompile the code for a specific node. For example, to update the code on mim1 node all you have to do is:

./rebar3 as mim1 compile\n

A similar command applies to other nodes, the important thing being rebar3's profile.

When the above command finishes, the code can be reloaded on the server by either reloading changed module(s) in the node's shell, e.g. l(mongoose_rdbms), or restarting the node.

"},{"location":"developers-guide/Testing-MongooseIM/#reading-test-reports","title":"Reading test reports","text":"

When finished, the test engine writes detailed html reports into a directory:

big_tests/ct_report/ct_run.[gobbledygook][datetime]/\n

Each run is saved into a new directory. This snippet:

#!/bin/bash\n\nlst=$(ls -rt ct_report | grep ct_run | tail -n 1)\nrm ct_report/lastrun\nln -s $lst ct_report/lastrun\n

can be of some help.

"},{"location":"developers-guide/Testing-MongooseIM/#checking-coverage","title":"Checking coverage","text":"

If you want to check how much of the code is covered by tests, run:

make cover_quicktest\n

Note

You need all the mim nodes (mim1, mim2 and mim3) up and running, even if you only run some of the tests. If any of the nodes is down, the test will crash.

This command will recompile and reload the code on dev nodes with coverage enabled and run test suites as defined in the spec. Coverage statistics will be available in big_tests/ct_report/cover.html and coverage subdirectory.

"},{"location":"developers-guide/Testing-MongooseIM/#advanced-topics","title":"Advanced topics","text":"

There are many more options available. One of them is sequentially testing a number of preset configurations - we do it every day on CircleCI, testing MongooseIM with various OTP versions and database backends. Altogether, we have eight preset configuration.

If you want to dig deeper, consult .circleci/config.yml, .github/workflows/ci.yml and tools/test.sh, everything we do is there.

"},{"location":"developers-guide/Testing-MongooseIM/#gathering-test-reports-from-tests","title":"Gathering test reports from tests","text":"

If you test your MongooseIM fork on GitHub Actions or other CI provider, you might want to access test reports (which also include node logs and crash dumps) that are created by the test runner.

"},{"location":"developers-guide/Testing-MongooseIM/#uploading-reports-to-s3","title":"Uploading reports to S3","text":"

Our script uses AWS CLI to upload test results to an S3 bucket. Simply set relevant environment variables in your repository settings (at least AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY have to be set), and enjoy test reports landing straight into your bucket (AWS_BUCKET variable should store the bucket's name).

"},{"location":"developers-guide/Testing-MongooseIM/#uploading-reports-to-google-drive","title":"Uploading reports to Google Drive","text":"

To store test results in Google Drive you need to create a new project and obtain service account credentials. You must also add Google Drive API to your project - to do this, navigate to APIs & Services in your project console and find & add Google Drive API in the Library tab. Once downloaded, encode the credentials file with base64 (e.g. cat serviceCreds.json | base64) and use the result as GDRIVE_SERVICE_ACCOUNT_CREDENTIALS environment variable in your repository settings.

"},{"location":"developers-guide/Testing-MongooseIM/#saving-reports-on-your-personal-account","title":"Saving reports on your personal account","text":"

The uploaded files will belong to the project that you created, i.e. will not be immediately visible from your personal Google Drive UI. To be able to upload files to your personal account, you can share the reports' directory with the project account. First, note the ID of the project's user that you created to gain the service account credentials (e.g. test-123@fair-smile-123456.iam.gserviceaccount.com). You can see this on the Service Accounts tab of the project console. Now, create a directory on your Google Drive that will serve as the test root directory. Go into the directory's sharing options and paste in the project's user ID, granting it write access. Click to expand the advanced sharing options and note the ID of the shared directory that's displayed in the share link (e.g. if the link is https://drive.google.com/drive/folders/1234567890abcdef?usp=sharing, the directory's ID is 1234567890abcdef). Finally, set GDRIVE_PARENT_DIR environment variable of your build to the directory ID that you noted in the previous step.

"},{"location":"developers-guide/Testing-MongooseIM/#load-testing","title":"Load testing","text":"

Alongside CI, we do also CLT (Continuous Load Testing). We use amoc and amoc-arsenal-xmpp to perform tests that give us a feedback on changes to MongooseIM performance.

"},{"location":"developers-guide/accumulators/","title":"Accumulators","text":"

XMPP stanza processing starts in the mongoose_c2s module, which receives the stanza from a socket, or in ejabberd_s2s_in which receives stanzas from federated XMPP clusters. The stanza is processed and eventually it and/or other messages are sent out, either to the original sender, to another c2s process within the same MongooseIM installation, or to another XMPP server.

At the beginning of the main processing chain an accumulator is created containing following set of keys:

  • ref - A unique reference of the acc, useful for tracing.
  • timestamp - An Erlang timestamp retrieved from os:timestamp().
  • origin_pid - A PID of the process that created the accumulator.
  • origin_location - {Module, Function Line} - A place in the code where the accumulator was created.
  • origin_stanza - Original stanza that triggered the processing (in a binary).
  • lserver - Nameprepped domain of the processing context.
  • host_type - Host type that the domain belongs to.
  • statem_acc - Data related to the C2S state machine.
  • stanza - A map with information about the stanza being routed. May be missing in some processing chains (when they are not triggered by a stanza)!
    • element - exml:element() with the current stanza being routed.
    • from_jid, to_jid - jid:jid() with the sender and the recipient.
    • name - A name of the top-level element in element.
    • type - A value of type attribute of the top-level element. If the attribute is missing, this field contains undefined.
    • ref - A reference of routed stanza.

It is then passed through all the stages until it reaches the end of its life. Throughout the process it is the very same accumulator; it is therefore possible to store a value in it on one stage of the processing and retrieve the same value later on.

The main assumption is that whatever MongooseIM does, it is always triggered by a stanza entering the system, with some exceptions, such as a couple of mongooseimctl operations, which create stanza-less accumulators. The stanza should always be packed into an accumulator and passed on, so that internally every action is performed the same way.

There are three main benefits from this approach:

  1. Performance - if we need to do something involving inspecting a stanza or more complicated operations (e.g. privacy check) we don't need to do it multiple times on various stages of processing - instead we can do it once and store the result in an accumulator.
  2. Debugging - it is now very easy to produce an exact track record of a stanza.
  3. Simplified implementation of modules which inherently involve multi-stage processing (e.g. mod_amp).
"},{"location":"developers-guide/accumulators/#api","title":"API","text":"

mongoose_acc module exports t() type which is the accumulator type.

"},{"location":"developers-guide/accumulators/#newnew_acc_params","title":"new(new_acc_params())","text":"

A constructor for accumulators. new_acc_params() is a map with following supported keys:

  • location - Should be a {Module, Function, Line} tuple (may be constructed with ?LOCATION macro from mongoose.hrl). Its format is not enforced by the acc logic but Dialyzer will most probably complain about any other type.
  • lserver - Nameprepped domain of the processing context.
  • host_type (optional) - Host type that the domain belongs to.
  • statem_acc (optional) - Data related to the C2S state machine.
  • element (optional) - If present, it will be used as a source for the stanza map.
  • from_jid, to_jid (optional) - Values used to override from and to attributes of the element, respectively.

If element is provided, the sender and recipient JIDs are extracted, either from the element itself, or from to_jid and from_jid parameters. The call will fail with an exception if it's not possible.

While allowed, stanza-less accumulators usage should be avoided.

"},{"location":"developers-guide/accumulators/#getters-for-predefined-fields","title":"Getters for predefined fields","text":"
  • ref(t())
  • timestamp(t())
  • lserver(t())
  • host_type(t())
  • element(t())
  • to_jid(t())
  • from_jid(t())
  • get_statem_acc(t())
  • packet(t()) - Returns an mongoose_c2s:packet() if there is a stanza in the accumulator.
  • stanza_name(t()) - Returns name value from stanza map.
  • stanza_type(t()) - Returns type value from stanza map.
  • stanza_ref(t()) - Returns ref value from stanza map. This is not the same as ref(t())!
"},{"location":"developers-guide/accumulators/#update_stanzastanza_params-t","title":"update_stanza(stanza_params(), t())","text":"

Replaces the whole stanza field in the accumulator with params provided in stanza_params(), which is a map of 3 fields: element, from_jid, to_jid. The same rules apply as in the case of constructor (new/1) but this time element field is mandatory.

"},{"location":"developers-guide/accumulators/#access-to-namespaced-fields","title":"Access to namespaced fields","text":"

It is possible to store and retrieve any data in the accumulator, that is related to the processing. There is no scope protection, so every module may access all namespaces and keys inside them.

  • set(Namespace :: any(), Key :: any(), Value :: any(), t())
  • set_permanent(Namespace :: any(), Key :: any(), Value :: any(), t()) - Upserts a field, which won't be removed during strip operation.
  • append(Namespace :: any(), Key :: any(), Value :: any(), t()) - In order to use this function, a Namespace:Key field must not exist or must be a list. Value is appended to the end of this list. If Value is a list, then a OldValue ++ Value operation is performed. In other cases OldValue ++ [Value] is used.
  • get(Namespace :: any(), Key :: any(), t()) - Returns a value of a specified field. Will crash if the NS:Key is not found.
  • get(Namespace :: any(), Key :: any(), Default :: any(), t()) - Returns a value of a specified field or Default if NS:Key is not found.
  • delete(Namespace :: any(), Key :: any(), t()) - Removes a specified field, no matter if it is permanent or not.
"},{"location":"developers-guide/accumulators/#stripping","title":"Stripping","text":"

Accumulator is used mostly to cache values for reuse within a c2s process; when it goes out to somewhere else, it is stripped of all unnecessary attributes except for the non-strippable ones, e.g.

  • ref
  • timestamp
  • origin_pid
  • origin_location
  • non_strippable - A set of permanent NS:Key pairs.

For a complete list, see mongoose_acc:default_non_strippable/0

If you want it to carry some additional values along with it, please use a dedicated api for setting \"permanent\" fields:

Acc2 = mongoose_acc:set_permanent(myns, myprop, 123, Acc1),\n

Permanent fields may be retrieved with ordinary get/3,4 functions. There are also functions get_permanent_keys/1 and get_permanent_fields/1 for extracting all at once.

The rationale behind stripping an accumulator is that some values stored in it are context-dependent. For example, at the beginning lserver refers to the host of the sender C2S. When an accumulator goes to the c2s of the recipient, the lserver attribute may change. There are also many cached values which are not valid anymore when user changes (e.g. privacy checks).

In order to strip an accumulator, please use strip(strip_params(), t()), where strip_params() is a map of:

  • lserver - New domain. Obviously, may be equal to the old value.
  • host_type - Host type associated with the new domain, if there is one.
  • element, from_jid, to_jid - The same rules apply as in update_stanza/2.
"},{"location":"developers-guide/accumulators/#main-principles-of-an-accumulator-processing","title":"Main principles of an accumulator processing","text":"
  1. An accumulator is created when a stanza enters the server.
  2. An XML stanza is never passed around as a pure exml:element().
  3. An accumulator is stripped when it is passed to a different context (e.g. another c2s process).
  4. If a process produces more stanzas to be routed, they must reuse the original acc but with the stanza replaced with update_stanza/2.
"},{"location":"developers-guide/accumulators/#hooks","title":"Hooks","text":"

Many of the MongooseIM functionalities are implemented in submodules which attach their handlers to hooks (this is covered in detail in \"Hooks and handlers\"). When it comes to the accumulators, the following rules apply:

  • If a hook is related to stanza processing, a Mongoose accumulator should be provided. A hook handler may modify an accumulator in every permitted way (i.e. shouldn't directly modify acc fields, bypassing mongoose_acc API) and should return the execution result in the hook:result field. This is not enforced but should be followed by convention.
  • Avoid passing superfluous arguments to handlers - e.g. an LServer in hook args is redundant since it is already present in the accumulator.

Most handlers have already been modified so that they accept an instance of mongoose_acc:t() as the first argument and return value by storing it inside it. How the accumulator is used within a module is up to the implementers of the module.

"},{"location":"developers-guide/accumulators/#iqs-and-accumulators","title":"IQs and accumulators","text":"

mongoose_iq module exposes a dedicated API for accessing IQ-related accumulator fields. These are:

  • info(Acc) - Returns a #iq{} record produced from a stanza stored in the accumulator. May be invalid or not_iq if the stanza is not a valid IQ.
  • xmlns(Acc) - Returns XMLNS of the first subelement inside an IQ. In most cases it is a namespace of <query/> subelement. May be undefined.
  • command(Acc) - Returns the name of a first subelement inside an IQ. May be undefined.

These functions ensure that cached information matches the accumulator's stanza, so all of them return a tuple with a possibly updated acc as a second element.

"},{"location":"developers-guide/accumulators/#sample-usage-actual-and-potential","title":"Sample usage, actual and potential","text":""},{"location":"developers-guide/accumulators/#privacy-check","title":"Privacy check","text":"

Stanzas are often checked against privacy lists. According to the current mongoose_privacy:privacy_check_packet implementation, the result is stored in an accumulator so if a check has to be repeated it is just one map read.

"},{"location":"developers-guide/accumulators/#tracing","title":"Tracing","text":"

origin_pid and origin_location fields are immutable for the lifespan of a single accumulator. There are many places in the server, where an accumulator may be created, so origin_location makes it much easier to find out what event has triggered the processing, and origin_pid identifies the process in which it happened.

"},{"location":"developers-guide/accumulators/#performance-measurement","title":"Performance measurement","text":"

Given that each accumulator has a timestamp denoting its creation time, it is now very easy to implement a metric showing the stanza processing time, or even multiple metrics splitting it into stages.

"},{"location":"developers-guide/domain_management/","title":"Domain management","text":"

Warning

Some modules do not work with dynamic domains. This is also the case for s2s and the XMPP components (XEP-0114) mechanism, as configured in the listen.service section.

"},{"location":"developers-guide/domain_management/#mongooseim-core-component","title":"MongooseIM core component","text":"

Implemented by mongoose_domain_core module.

It is based on gen_server & ETS table w. public read access. This module is local for the node, it does not implement any sync across the nodes in a cluster. This component is responsible for dynamic routing, it is always started by MIM even if there is no support of dynamic domain names configured.

It provides the following interfaces:

  • Init - accepts the list of initial domain/host_type pairs provided in config file, and the list of host_types that can be used for dynamic insertion. Any of these lists can be empty, initial list of domain/host_type pairs can have some unique host_types not mentioned in the host_types list. The component is initialised by the main MIM supervisor. Implemented in mongoose_domain_sup:start_link/0.
  • Insert - adding new domain/host_type pair. This function is idempotent. It returns success on an attempt to insert the existing data, but fails if ETS already has the domain name associated with another host type. Implemented in mongoose_domain_api:insert_domain(Domain, HostType).
  • Remove - This function is idempotent. It deletes existing domain/host_type pairs. It is impossible to delete domain/host_type pairs specified on init of the component. Implemented in mongoose_domain_api:delete_domain(Domain).
  • Get host type by domain. Implemented in mongoose_domain_api:get_host_type(Domain)..
  • Get all domains configured for the host_type. Implemented in mongoose_domain_api:get_domains_by_host_type(HostType)..
  • Get the list of the host_types provided during initialisation. Implemented in mongoose_domain_api:get_all_static()..

mongoose_domain_core implementation:

  • Has mongoose_domain_core table.
  • Default (initial) domains are static.
  • Disabled or deleted domains are not in mongoose_domain_core.
  • Static domains are non-mutable.
  • Static domains are not replicated.
  • Static domains has priority above DB domains.
"},{"location":"developers-guide/domain_management/#mongooseim-service","title":"MongooseIM service","text":"

As described in Services. Implements the service behaviour. Implemented by service_domain_db module.

This service provides an interface for dynamic management of domain names. It has persistent storage (RDBMS) where it stores information about domain names. This service ensures synchronization of dynamically managed domain names across different nodes in the cluster.

The minimal set of information associated with domain name is this:

  • Host type
  • Status (enabled/disabled)

This service provides the following interfaces:

  • Init - on init all the \u201cenabled\u201d domain names from the persistent storage is added to the core MIM component described above.
  • Add domain name (w/ host type) - This function is idempotent. An added domain is always \u201cenabled\u201d by default - it must be added in the core MIM component described in the previous section. If it\u2019s successfully enabled than Information about the domain name is added into persistent storage and distributed across all the nodes in the cluster.
  • Disabling/Enabling domain name - This function is idempotent. The status of the existing domain is always changed on successful call. If domain name is enabled, then it is added in the core MIM component. On disabling domain name is deleted from the core MIM component. Change of the status is distributed across all the nodes in the cluster.
  • Remove the domain name - This function is idempotent. Domain name is deleted from the core MIM component (if required) and from the DB. This action is distributed across all the nodes in the cluster.

In case of any issues (domain name is already configured with another host_type or host_type is not supported), errors are logged.

The database schema contains two tables:

  • domain_settings - one record per domain. Maps domain name to host_type and enabled status.
  • domain_events - the log of changes. The only reason it exists is that we can track updates in the domain_settings and get apply updates across different nodes. The old events are eventually deleted from the table. Removal is triggered by all nodes of MongooseIM, that have the service configured.

service_domain_db module does two tasks:

  • Initially downloads domains from domain_settings table, using sorting by id.
  • Waits for check_for_updates message and updates core component, depending on records in the domain_events table.

We use id field to sort records when paginating.

"},{"location":"developers-guide/domain_management/#domain-removal","title":"Domain removal","text":"

You cannot delete domains with unknown host-type. Configure host-type first to delete such domains.

Modules which store data in RDBMS and support dynamic domains will remove all persistent data associated with a domain when its removal is requested. This is not the case for NoSQL databases or Mnesia. Because of that, we recommend using RDBMS with dynamic domains. Please note, that mod_auth_token is the only exception for now and does not remove data from RDBMS when removing a domain.

"},{"location":"developers-guide/domain_management/#service-options","title":"Service options","text":"

Described in the services section.

"},{"location":"developers-guide/domain_management/#command-line-interface","title":"Command Line Interface","text":"

You can manage the domains with the mongooseimctl command. Some examples are provided below:

"},{"location":"developers-guide/domain_management/#add-domain","title":"Add domain:","text":"
./mongooseimctl domain addDomain --domain example.com --hostType type1\n
"},{"location":"developers-guide/domain_management/#delete-domain","title":"Delete domain:","text":"
./mongooseimctl domain removeDomain --domain example.com --hostType type1\n
"},{"location":"developers-guide/domain_management/#disable-domain","title":"Disable domain:","text":"
./mongooseimctl domain disableDomain --domain example.com\n
"},{"location":"developers-guide/domain_management/#enable-domain","title":"Enable domain:","text":"
./mongooseimctl domain enableDomain --domain example.com\n

Run ./mongooseimctl domain to get more information about all supported operations.

"},{"location":"developers-guide/domain_management/#api","title":"API","text":"

You can manage domains with one of our API's:

  • The GraphQL API has the same funtionality as the command line interface. The queries and mutations for domains are grouped under the domain category.
  • The REST API (deprecated) supports domain management as well. See Dynamic Domains for details.
"},{"location":"developers-guide/hooks_description/","title":"Selected hooks description","text":"

This is a brief documentation for a few selected hooks. Though hooks & handlers differ in what they are there to do, it is not necessary to describe them all, because the mechanism is general. The following is meant to give you the idea of how the hooks work, what they are used for and the various purposes they can serve.

"},{"location":"developers-guide/hooks_description/#user_send_","title":"user_send_*","text":"

mongoose_c2s_hooks:user_send_packet(HostType, Acc, Params)\nmongoose_c2s_hooks:user_send_message(HostType, Acc, Params)\nmongoose_c2s_hooks:user_send_presence(HostType, Acc, Params)\nmongoose_c2s_hooks:user_send_iq(HostType, Acc, Params)\nmongoose_c2s_hooks:user_send_xmlel(HostType, Acc, Params)\n
These hooks are run in mongoose_c2s after the C2S process receives an XML element from the client.

The hooks won't be called for stanzas arriving from a user served by a federated server (i.e. on a server-to-server connection handled by ejabberd_s2s).

The logic depends on the C2S state, which changes during the connection, authentication and resource binding process:

"},{"location":"developers-guide/hooks_description/#hooks-called-for-session_established","title":"Hooks called for session_established","text":"

Some rudimentary verification of the stanza is done once it is received from the socket:

  • if present, the from attribute of the stanza is checked against the identity of the user whose session the process in question serves; if the identity does not match the contents of the attribute, an error is returned,
  • the recipient JID (to attribute) format is verified.

After successful checks, the following hooks are called. The first one is user_send_packet, which is called for all received XML elements. Next, depending on the type of the element, one of the following hooks is called:

  • user_send_message for messages,
  • user_send_presence for presences,
  • user_send_iq for IQ (info/query) stanzas,
  • user_send_xmlel for other XML elements.

These type-specific hooks should be used instead of user_send_packet when possible.

"},{"location":"developers-guide/hooks_description/#hooks-called-for-other-states","title":"Hooks called for other states","text":"

If the session is not established (e.g. the client hasn't authenticated or its resource is not bound yet), only the user_send_xmlel hook is called regardless of the XML element type. No other user_send_* hooks are called, and no stanza checks are performed.

"},{"location":"developers-guide/hooks_description/#handler-examples","title":"Handler examples","text":"

These hooks are handled by the following modules:

  • mod_blocking - handles IQ requests for blocking lists.
  • mod_caps - detects and caches capability information sent with certain presences for later use.
  • mod_carboncopy - forwards messages to all the user's resources which have carbon copying enabled.
  • mod_event_pusher - sends selected messages to an external service.
  • mod_inbox - stores messages in the user's inbox.
  • mod_mam - stores outgoing messages in an archive.
  • mod_ping - upon reception of every message from the client, this module (re)starts a timer; if nothing more is received from the client within 60 seconds, it sends an IQ ping, to which the client should reply - which starts another timer.
  • mod_presence - handles presence stanzas, updating the user presence state and broadcasting presence updates.
  • mod_privacy - filters sent stanzas according to privacy lists and handles privacy-related IQ requests.
  • mod_register - registers a new user when a registration IQ is received. user_send_xmlel is used because the stanza is received while the session is not established.
  • mod_smart_markers - checks if the stanza contains chat markers info and stores the update.
  • mod_stream_management - counts stanzas sent by the client and handles special XML elements like <a> and <enable>.
"},{"location":"developers-guide/hooks_description/#filter_packet-and-filter_local_packet","title":"filter_packet and filter_local_packet","text":"
mongoose_hooks:filter_packet({From, To, Acc, Packet})\nmongoose_hooks:filter_local_packet({From, To, Acc, Packet})\n

These hooks are run when the packet is being routed by ejaberd_router:route/4, which is the most general function used to route stanzas across the entire cluster. For example, mongoose_c2s calls it after calling the user_send_message or user_send_iq hook, and multiple modules use it for sending replies and errors.

  • filter_packet is run by mongoose_router_global for all routed packets. It is called at the start of the routing procedure.
  • filter_local_packet is run by mongoose_local_delivery when the packet is being routed to a domain hosted by the local server.

The handlers expect the {From, To, Acc, Packet} accumulator as their first argument. The stanza can be filtered out (in case the handler returns drop), left unchanged or modified.

filter_packet is a global hook

Note the hook code inside mongoose_hooks:

filter_packet(Acc) ->\n    run_global_hook(filter_packet, Acc, #{}).\n
This hook is run not for a host type, but globally across the whole cluster. Keep that in mind when registering the handlers and appropriately use the atom global instead of a host type as the second argument.

"},{"location":"developers-guide/hooks_description/#handler-examples_1","title":"Handler examples","text":"

These hooks are handled by the following modules:

  • mod_domain_isolation - filters out cross-domain stanzas.
  • mod_event_pusher - sends out configured events (e.g. push notifications) for incoming stanzas.
  • mod_inbox - stores incoming messages in the recipient's inbox.
  • mod_mam - stores incoming messages in the recipient's archive, and adds MAM-related elements to the message.
  • mod_pubsub - for each subscription authorization form sent by a node owner, the subscription state is updated, and the stanza is dropped.
  • mod_smart_markers - filters out chat markers, because they are handled separately by mod_offline_chatmarkers.
"},{"location":"developers-guide/hooks_description/#user_receive_","title":"user_receive_*","text":"
mongoose_c2s_hooks:user_receive_packet(HostType, Acc, Params)\nmongoose_c2s_hooks:user_receive_message(HostType, Acc, Params)\nmongoose_c2s_hooks:user_receive_presence(HostType, Acc, Params)\nmongoose_c2s_hooks:user_receive_iq(HostType, Acc, Params)\nmongoose_c2s_hooks:user_receive_xmlel(HostType, Acc, Params)\n

These hooks are run in mongoose_c2s after the recipient's C2S process receives an XML element and before sending it to the user.

The hooks won't run for stanzas which are destined to users of a different XMPP domain served by a federated server, connection to which is handled by ejabberd_s2s.

The first hook is user_receive_packet, which is called for all received XML elements. Next, depending on the type of the stanza, one of the following hooks is called:

  • user_receive_message for messages,
  • user_receive_presence for presences,
  • user_receive_iq for IQ (info/query) stanzas,
  • user_receive_xmlel for other XML elements.

These type-specific hooks should be used instead of user_receive_packet when possible.

"},{"location":"developers-guide/hooks_description/#handler-examples_2","title":"Handler examples","text":"

These hooks are handled by the following modules:

  • mod_caps - detects and caches capability information sent with certain messages for later use.
  • mod_carboncopy - forwards messages to all the user's resources which have carbon copying enabled.
  • mod_last - filters queries for user's last activity according to presence subscriptions.
  • mod_presence - handles incoming presences from other users, updating the presence status, and responds to presence probes.
  • mod_privacy - filters received stanzas according to privacy lists.
  • mod_stream_management - filters out stanzas with conflicting session ID's.
"},{"location":"developers-guide/hooks_description/#offline_message","title":"offline_message","text":"
mongoose_hooks:offline_message(Acc, From, To, Packet)\n

ejabberd_sm runs this hook for each message which cannot be delivered, because no resource (i.e. device or desktop client application) of its recipient is available online for delivery.

"},{"location":"developers-guide/hooks_description/#handler-examples_3","title":"Handler examples","text":"

This hook is handled by the following modules, listed in the order of execution:

  • mod_offline_chatmarkers - for chat marker messages, the handler stores them and returns {stop, Acc}, preventing further handlers from being called.

  • mod_offline - stores messages in a persistent way until the recipient comes online, and the message can be successfully delivered. The handler returns {stop, Acc} for all messages, preventing further handlers from being called.

  • mod_offline_stub - returns {stop, Acc} for all messages, preventing further handlers from being called.

  • ejabberd_sm - calls ejabberd_sm:bounce_offline_message, which responds with the <service-unavailable/> stanza error. In the case of using mod_mam the message is actually stored, and no such error should be sent - that's why the module mod_offline_stub can be enabled.

"},{"location":"developers-guide/hooks_description/#remove_user","title":"remove_user","text":"
mongoose_hooks:remove_user(Acc, LServer, LUser)\n

remove_user is run by ejabberd_auth - the authentication module - when a request is made to remove the user from the database of the server.

"},{"location":"developers-guide/hooks_description/#handler-examples_4","title":"Handler examples","text":"

This hook is used by multiple modules, since removing a user requires many cleanup operations:

  • mod_auth_token removes user's authentication tokens;
  • mod_event_pusher disables user's push notifications;
  • mod_inbox removes user's inbox;
  • mod_last removes last activity information (XEP-0012: Last Activity);
  • mod_mam removes the user's message archive;
  • mod_muc_light quits multi-user chat rooms;
  • mod_offline deletes the user's offline messages;
  • mod_privacy removes the user's privacy lists;
  • mod_private removes the user's private xml data storage;
  • mod_pubsub unsubscribes from publish/subscribe channels;
  • mod_roster removes the user's roster from the database;
  • mod_smart_markers removes chat markers stored for the user;
  • mod_vcard removes user's vCard information.
"},{"location":"developers-guide/hooks_description/#node_cleanup","title":"node_cleanup","text":"
mongoose_hooks:node_cleanup(Node)\n

node_cleanup is run by a mongooseim_cleaner process which subscribes to nodedown messages. Currently, the hook is run inside a global transaction (via global:trans/4).

The job of this hook is to remove all processes registered in Mnesia. MongooseIM uses Mnesia to store processes through which messages are then routed - like user sessions or server-to-server communication channels - or various handlers, e.g. IQ request handlers. Those must obviously be removed when a node goes down, and to do this the modules ejabberd_local, ejabberd_router, ejabberd_s2s, ejabberd_sm and mod_bosh register their handlers with this hook.

Number of retries for this transaction is set to 1 which means that in some situations the hook may be run on more than one node in the cluster, especially when there is little garbage to clean after the dead node. Setting retries to 0 is not good decision as it was observed that in some setups it may abort the transaction on all nodes.

"},{"location":"developers-guide/hooks_description/#session_opening_allowed_for_user","title":"session_opening_allowed_for_user","text":"
allow == mongoose_hooks:session_opening_allowed_for_user(HostType, JID)\n

This hook is run after authenticating when user sends the IQ opening a session. Handler function are expected to return:

  • allow if a given JID is allowed to open a new sessions (the default)
  • deny if the JID is not allowed but other handlers should be run
  • {stop, deny} if the JID is not allowed but other handlers should not be run

In the default implementation the hook is not used, built-in user control methods are supported elsewhere. This is the perfect place to plug in custom security control.

"},{"location":"developers-guide/hooks_description/#other-hooks","title":"Other hooks","text":"
  • acc_room_affiliations
  • adhoc_local_commands
  • adhoc_sm_commands
  • amp_check_condition
  • amp_determine_strategy
  • amp_verify_support
  • anonymous_purge
  • auth_failed
  • c2s_stream_features
  • can_access_identity
  • can_access_room
  • caps_recognised
  • check_bl_c2s
  • disco_info
  • disco_local_features
  • disco_local_identity
  • disco_local_items
  • disco_muc_features
  • disco_sm_features
  • disco_sm_identity
  • disco_sm_items
  • does_user_exist
  • extend_inbox_result
  • failed_to_store_message
  • filter_local_packet
  • filter_packet
  • filter_pep_recipient
  • filter_room_packet
  • filter_unacknowledged_messages
  • forbidden_session
  • foreign_event
  • forget_room
  • get_key
  • get_mam_muc_gdpr_data
  • get_mam_pm_gdpr_data
  • get_pep_recipients
  • get_personal_data
  • inbox_unread_count
  • invitation_sent
  • is_muc_room_owner
  • join_room
  • leave_room
  • mam_archive_id
  • mam_archive_message
  • mam_archive_size
  • mam_archive_sync
  • mam_flush_messages
  • mam_get_behaviour
  • mam_get_prefs
  • mam_lookup_messages
  • mam_muc_archive_id
  • mam_muc_archive_message
  • mam_muc_archive_size
  • mam_muc_archive_sync
  • mam_muc_flush_messages
  • mam_muc_get_behaviour
  • mam_muc_get_prefs
  • mam_muc_lookup_messages
  • mam_muc_remove_archive
  • mam_muc_retraction
  • mam_muc_set_prefs
  • mam_remove_archive
  • mam_retraction
  • mam_set_prefs
  • mod_global_distrib_known_recipient
  • mod_global_distrib_unknown_recipient
  • node_cleanup
  • offline_groupchat_message
  • offline_message
  • packet_to_component
  • presence_probe
  • privacy_check_packet
  • privacy_get_user_list
  • privacy_iq_get
  • privacy_iq_set
  • privacy_list_push
  • privacy_updated_list
  • push_notifications
  • register_subhost
  • register_user
  • remove_domain
  • remove_user
  • reroute_unacked_messages
  • resend_offline_messages
  • room_exists
  • room_new_affiliations
  • room_packet
  • roster_get
  • roster_get_jid_info
  • roster_get_subscription_lists
  • roster_get_versioning_feature
  • roster_groups
  • roster_in_subscription
  • roster_out_subscription
  • roster_process_item
  • roster_push
  • roster_set
  • s2s_allow_host
  • s2s_receive_packet
  • s2s_send_packet
  • s2s_stream_features
  • session_cleanup
  • session_opening_allowed_for_user
  • set_presence
  • set_vcard
  • sm_filter_offline_message
  • sm_register_connection
  • sm_remove_connection
  • unacknowledged_message
  • unregister_subhost
  • unset_presence
  • update_inbox_for_muc
  • user_available
  • user_open_session
  • user_ping_response
  • user_receive_iq
  • user_receive_message
  • user_receive_packet
  • user_receive_presence
  • user_receive_xmlel
  • user_send_iq
  • user_send_message
  • user_send_packet
  • user_send_presence
  • user_send_xmlel
  • user_socket_closed
  • user_socket_error
  • user_stop_request
  • user_terminate
  • vcard_set
  • xmpp_bounce_message
  • xmpp_presend_element
  • xmpp_send_element
  • xmpp_stanza_dropped
"},{"location":"developers-guide/logging/","title":"Logging","text":"

To use logger in your module, include

-include(\"mongoose_logger.hrl\").\n
or
-include(\"mongoose.hrl\").\n

"},{"location":"developers-guide/logging/#logging-macros","title":"Logging macros","text":"

There are several macros for the most common logging levels:

?LOG_DEBUG(#{what => debug_event, info => Arg}),\n?LOG_INFO(#{what => info_event, info => Arg}),\n?LOG_NOTICE(#{what => notice_event, info => Arg}),\n?LOG_WARNING(#{what => warning_event, info => Arg}),\n?LOG_ERROR(#{what => error_event, info => Arg}),\n?LOG_CRITICAL(#{what => critical_event, info => Arg}),\n

Use them in correspondence with the appropriate log level. Please be mindful of what is logged and which log level is used for it.

"},{"location":"developers-guide/logging/#logging-levels","title":"Logging levels","text":"

A system operator can choose the global log level by setting loglevel in mongooseim.toml.

Possible values are the standard syslog severity levels, plus all or none: \"all\", \"debug\", \"info\", \"notice\", \"warning\", \"error\", \"critical\", \"alert\", \"emergency\", and \"none\".

[general]\n  loglevel = \"notice\"\n

If a user sets the log level to all, then they would see all messages in logs.

Levels warning and error are the most commonly used for production systems.

"},{"location":"developers-guide/logging/#logging-format","title":"Logging format","text":"

We use structured logging as inspired by Ferd's post. We also use a modified logfmt format as one of the possible default logger formatters. This format is Splunk and ELK friendly. Check the list of fields for fields documentation.

what => something_interesting field is required.

    ?LOG_ERROR(#{what => check_password_failed,\n                 reason => Error, user => LUser})\n\n    try ...\n    catch\n        Class:Reason:StackTrace ->\n            ?LOG_ERROR(#{what => check_password_failed,\n                         class => Class, reason => Reason, stacktrace => StackTrace}),\n            erlang:raise(Class, Reason, StackTrace)\n    end\n

Field user => <<\"alice\">> is often used too.

A common way to name an error event is what => function_name_failed. For example, what => remove_user_failed. Use the advice critically, it would not work well for any function. Counterexample:

handle_info(Info, State) ->\n    ?LOG_WARNING(#{what => unexpected_message, msg => Info}),\n    {noreply, State}.\n
"},{"location":"developers-guide/logging/#filtering-logs-by-module","title":"Filtering logs by module","text":"

Setting loglevel to debug can lead to a flood of messages in logs. To set a different loglevel for just one module, call:

mongoose_logs:set_global_loglevel(error).\nmongoose_logs:set_module_loglevel(mod_mam, debug).\n

This code sets the loglevel to error for all log messages, except for those generated by mod_mam. All messages from mod_mam would be logged.

"},{"location":"developers-guide/mod_amp_developers_guide/","title":"The Developer's Guide to mod_amp","text":"

This is a quick, introductory guide for developers wishing to extend mod_amp or plug into the message processing system.

"},{"location":"developers-guide/mod_amp_developers_guide/#source-files-headers-and-tests","title":"Source Files, Headers and Tests","text":"
  • include/amp.hrl This header file contains the amp XML namespace and the types used by mod_amp: amp_rule() and amp_strategy() are the top-level points of interest.

  • src/mod_amp.erl This module is responsible for plugging in all the other components. It's main driving function is filter_packet. After determining that a given message contains amp rules, the module proceeds by determining its strategy for the message and comparing it against the rules. The server may return an error at multiple points in its work-flow. This is signaled by calling the function send_error_and_drop/3 or send_errors_and_drop/2.

  • src/amp.erl This module is responsible for parsing rules from incoming elements and serializing server responses in the proper format. binaries_to_rule/3 can return either a proper amp_rule(), or an amp_invalid_rule(), which does not contain sensible values, but can be used by the server to create an appropriate error message.

  • test/amp_SUITE.erl Tests for the API functions exported by amp.erl

  • src/amp_strategy.erl This module is where the server-side hook for determining a default action for a given message is performed. Calls to ejabberd_sm are made here.

  • src/amp_resolver.erl This module models the resolution of amp rules, given a certain strategy. Also, the function verify_rule_support is hard-coded here to return an unsupported- type error for unsupported rule actions and values.

  • test/amp_resolver_SUITE.erl These tests verify that the amp_resolver:check_condition/3 hook works as intended, i.e: that the rules which would be triggered given a particular server-side strategy actually do get triggered, and that all others get rejected.

  • test/amp_gen.erl This module contains PropEr generators for server-side strategies, as well as valid and invalid amp rules. Used in both test suites.

"},{"location":"developers-guide/mod_amp_developers_guide/#hooks-for-other-modules","title":"Hooks for Other Modules","text":"

If your module would like to have some say in the amp decision making process, please refer to the hooks: amp_determine_strategy and amp_check_condition. Remember that the hook for check_condition is a fold on a boolean(), and should behave like a variadic or. I.e: once a rule is deemed to apply, other hooks SHOULD NOT revert this value to false.

Cf. this code from amp_resolver:

-spec check_condition(any(), amp_strategy(), amp_condition(), amp_value())\n                          -> boolean().\ncheck_condition(HookAcc, Strategy, Condition, Value) ->\n    case HookAcc of\n        true -> true;   %% SOME OTHER HOOK HAS DECIDED THAT THIS RULE APPLIES %%\n        _    -> resolve(Strategy, Condition, Value) %% PERFORM LOCAL CHECK %%\n    end.\n
"},{"location":"developers-guide/mod_amp_developers_guide/#ideas-for-further-development","title":"Ideas for Further Development","text":""},{"location":"developers-guide/mod_amp_developers_guide/#easy","title":"Easy","text":"
  • Implement the 'alert' and 'drop' action types.
  • Implement support for the 'stored' value for 'deliver'
"},{"location":"developers-guide/mod_amp_developers_guide/#medium","title":"Medium","text":"
  • Implement the security policy described in the third bullet point of XEP-0079, Section 9 (Security Considerations). This will require that amp_resolver:verify_support also take the {From, To, Packet} :: hook_data() parameter and check that From is permitted to know about To's presence. If they are not, then the server should treat this as a not-acceptable amp request.

  • Make support for various actions, conditions and values configurable. This will require implementing an intelligent mechanism for matching the user-supplied rules with what's configured server-side. Currently, server-side support is hard-coded in several places:

    1. Disco announcements are in mod_amp:amp_features/0
    2. Rule support is in amp_resolver:verify_rule_support/1
    3. Every other function that deals with rules can handle unsupported rules, but ignores their meaning and decides that these rules don't apply.
"},{"location":"developers-guide/mod_amp_developers_guide/#hard","title":"Hard","text":"
  • Implement support for the 'expire-at' condition.
"},{"location":"developers-guide/mod_muc_light_developers_guide/","title":"The Developer's Guide to mod_muc_light","text":"

This is an in-depth guide on mod_muc_light design decisions and implementation.

"},{"location":"developers-guide/mod_muc_light_developers_guide/#source-header-and-test-suite-files","title":"Source, header and test suite files","text":"

All source files can be found in src/muc_light/.

  • mod_muc_light.erl

Main module. It implements the gen_mod behaviour. It subscribes to some essential hooks and exports several functions, mostly callbacks. It handles integration with mod_disco, mod_privacy and mod_roster. All operations that take place outside the room (including the room creation) are implemented here. Last but not least - this module prevents service-unavailable errors being sent when an offline user receives a groupchat message.

  • mod_muc_light_codec_backend.erl

A behaviour implemented by modules that translate the MUC Light internal data format to stanzas for clients and vice versa. Besides specifying callbacks, it implements generic error encoder function.

  • mod_muc_light_codec_legacy.erl

An implementation of XEP-0045 compatibility mode. Note, that while some parts of the legacy mode are implemented directly in mod_muc_light.erl, the stanza translation takes place here. It does not utilise the full potential of the MUC Light extension but allows using the standard MUC implementation in XMPP client libraries for prototyping or the transition phase. Not recommended for production systems (less efficient than modern codec and requires more round-trips).

  • mod_muc_light_codec_modern.erl

An implementation of a modern MUC Light protocol, described in the XEP. Supports all MUC Light features.

  • mod_muc_light_db_backend.erl

A behaviour implemented by database backends for the MUC Light extension.

  • mod_muc_light_db_mnesia.erl

A Mnesia backend for this extension. Uses transactions for room metadata updates (configuration and affiliation list) and dirty reads whenever possible.

  • mod_muc_light_db_rdbms.erl

An SQL backend for mod_muc_light. create_room, destroy_room, remove_user, set_config, modify_aff_users execute at least one query in a single transaction. room_exists, get_user_rooms, get_user_rooms_count, get_config, get_blocking, set_blocking, get_aff_users execute only one query per function call. get_info executes 3 SELECT queries, not protected by a transaction.

  • mod_muc_light_db_rdbms_sql.erl

SQL queries for mod_muc_light_db_rdbms.erl.

  • mod_muc_light_room.erl

This module handles everything that occurs inside the room: access checks, metadata changes, message broadcasting etc.

  • mod_muc_light_utils.erl

Utilities shared by other MUC Light modules. It includes the room configuration processing and the affiliation logic.

The header file can be found in include/.

  • mod_muc_light.hrl

It contains definitions of MUC Light namespaces, default configuration options and several common data types and records.

There are 2 test suites and one helper module in big_tests/tests.

  • muc_light_SUITE.erl

Main test suite, checks all the most important functionalities of the MUC Light extension.

  • muc_light_legacy_SUITE.erl

muc_light_SUITE.erl equivalent that uses XEP-0045 compatibility mode.

  • muc_helper.erl

Provides handy iterators over room participants. Used in MUC Light suites but in the future could be used in muc_SUITE as well.

"},{"location":"developers-guide/mod_muc_light_developers_guide/#hooks-handled-by-this-extension","title":"Hooks handled by this extension","text":"
  • offline_groupchat_message handled by mod_muc_light:prevent_service_unavailable/3

Prevents the default behaviour of sending service-unavailable error to the room when a groupchat message is sent to an offline occupant.

  • remove_user handled by mod_muc_light:remove_user/2

Triggers DB cleanup of all data related to the removed user. Includes a broadcast of a notification about user removal from occupied rooms.

  • disco_local_items handled by mod_muc_light:get_muc_service/5

Adds a MUC service item to the Disco result. Uses either a MUC Light or a classic MUC namespace when the legacy mode is enabled.

  • roster_get handled by mod_muc_light:add_rooms_to_roster/2

Injects room items to the user's roster.

  • privacy_iq_get, privacy_iq_set handled by mod_muc_light:process_iq_get/5 and mod_muc_light:process_iq_set/4 respectively

These callbacks handle blocking settings when legacy mode is enabled.

  • is_muc_room_owner, can_access_room, can_access_identity used by mod_muc_light:is_room_owner/3, mod_muc_light:can_access_room/3 and mod_muc_light:can_access_identity/3 respectively

Callbacks that provide essential data for the mod_mam_muc extension.

"},{"location":"developers-guide/mod_muc_light_developers_guide/#hooks-executed-by-this-extension","title":"Hooks executed by this extension","text":"
  • filter_room_packet by codecs

Allows mod_mam_muc to archive groupchat messages.

  • forget_room by mod_muc_light_db_mnesia and mod_muc_light_room

It is a part of mod_mam_muc integration as well. A hook used for MAM cleanup upon room destruction.

"},{"location":"developers-guide/mod_muc_light_developers_guide/#advantages-and-drawbacks-compared-to-classic-muc","title":"Advantages and drawbacks (compared to classic MUC)","text":"

The new MUC implementation brings quite a few benefits to the table:

  • It is fully distributed - Does not have SPOF, concurrent senders do not block each other, especially in large rooms. Message broadcasting is being done in sender c2s context.
  • It does not use presences - Much less traffic and stable membership information, especially on mobile networks.
  • It provides built-in blocking support - Instead of blocking traffic like Privacy Lists do, it handles blocklists internally, preventing the blocker from being added to or by blocked entities.
  • Less round-trips - A room can be created and configured with an initial list of occupants with a single request.
  • Versioning - Reduces traffic and allows clients to reliably and quickly detect that the room state has changed.
  • Isolation - Processing errors are contained in a sender context, not affecting other room occupants.
  • Fully customisable room configuration - Your users can store any meta room information you allow.

Drawbacks are:

  • Requires DB transactions to ensure Room state consistency.
  • Fetches the occupant list from DB for every message that is broadcasted.
  • Due to concurrent message broadcast, it is possible for occupants to receive messages in a different order (given the messages are broadcasted at the exactly same time).
  • With stream resumption disabled or when resumption times out, user may miss a message in a following scenario:
  • Message A archived
  • Message B archived
  • Message B delivered to the user
  • User loses connection
  • Resumption timeout
  • User queries MAM for all messages after B and misses A
"},{"location":"developers-guide/mod_muc_light_developers_guide/#ideas-for-further-development","title":"Ideas for Further Development","text":""},{"location":"developers-guide/mod_muc_light_developers_guide/#easy","title":"Easy","text":"
  • Add more tests for negative cases
"},{"location":"developers-guide/mod_muc_light_developers_guide/#medium","title":"Medium","text":"
  • Add optional per-room processes to avoid the need of DB transactions and ensure message ordering (maybe \"hard\"?).
  • Redis backend
"},{"location":"developers-guide/mod_muc_light_developers_guide/#hard","title":"Hard","text":"
  • Room metadata cache (maybe \"medium\"?).
"},{"location":"developers-guide/mongoose_wpool/","title":"mongoose_wpool","text":"

All the outgoing pools configured by the outgoing_pools option are hidden behind the mongoose_wpool API. Every pool is described by a tuple {Type, Host, Tag, PoolOptions, ConnectionOptions} (see outgoing pools for details about each element of the tuple).

"},{"location":"developers-guide/mongoose_wpool/#supervision-tree","title":"Supervision tree","text":"
  • mongoose_wpool_sup supervisor for every type of the pool. Under it there can be many children of:
    • mongoose_wpool_type_sup is started on-demand when a pool of given type is started. Many pools of the same type are supervised by the supervisor. Its children are:
      • mongoose_wpool_mgr all the pools of the same type are managed by a manager. It's responsible for starting, stopping and restarting the pool. Restarting happens when the main worker_pool process for the pool is stopped unintentionally. This usually happens when there was too many restarts of worker processes.
      • many worker_pool supervisors holding a specific pool are on the same level as the manager.

The mongoose_wpool_mgr manages the pool by setting monitor for every started pool.

"},{"location":"developers-guide/mongoose_wpool/#implementing-new-pool-type","title":"Implementing new pool type","text":"

To add a new pool type, create a mongoose_wpool_NEW_TYPE module implementing the mongoose_wpool behaviour. This means that for a new type xyz we need to create a mongoose_wpool_xyz module. Then we can use the xyz type to start the pool via outgoing_pools option or directly via the mongoose_wpool API.

"},{"location":"developers-guide/release_config/","title":"Release/Installation configuration","text":""},{"location":"developers-guide/release_config/#advanced-release-configuration","title":"Advanced release configuration","text":"

It's now possible to install MongooseIM from source in two modes:

  • system - it's used internally to generate Linux packages (.deb, .rpm)
  • user - which is the default mode and used for testing on GitHub Actions and in development

You can also build OS specific packages by using the tools in [MongooseIM repo root]/tools/pkg - refer to README.md therein.

"},{"location":"developers-guide/release_config/#configure-script","title":"Configure script","text":"

The tools/configure script can be used to specify which 3rd party dependencies should be included in the final release or to set the installation prefix and installation mode. More details can found in the tool's help. The help is printed when the script is run without any parameters tools/configure:

configure: OPTIONS\n\nSpecifies which 3rd party deps will be included in the release.\nWrites configure.out file as output - this file can be sourced with:\n\n    . configure.out\n\nWrites rel/configure.vars.config which can be used as Reltool input.\n\n3rd party apps:\n\n    with-none           include no 3rd party drivers\n    with-all            include all drivers\n    with-mysql          include mysql driver\n    with-odbc           include an ODBC driver (requires unixodbc to compile)\n    with-pgsql          include pgsql driver\n    with-redis          include redis driver\n\nOptions:\n\n    prefix    Installation PREFIX directory. Default: /usr/local\n    system    Install files into $PREFIX/{bin, etc, ...} instead of a completely self contained release. Default: no\n    user      System user to run the server as. Default:\n

This script is also accessible via the make configure target.

"},{"location":"developers-guide/release_config/#example","title":"Example","text":"

If mysql and redis are the only drivers that should be included in the release, run the following command before make rel:

$ ./tools/configure with-mysql with-redis\n

You only need to run the ./tools/configure command once (unless changing the release's config is needed to include some other dependencies).

"},{"location":"developers-guide/release_config/#system-install","title":"System install","text":"

To manually test the installation run tools/test-install.sh. This script is intended for careful inspection by a human user, not for automation. Results should be similar to those described below.

On Mac:

./tools/configure with-all user=erszcz prefix=/tmp/mim-sandbox-system system=yes\ncat configure.out rel/configure.vars.config\nRUNNER_GROUP=staff make install\n

Overriding RUNNER_GROUP on a Mac is necessary, as users by default don't have private groups of the same name as their usernames.

Generated build configs:

$ cat configure.out rel/configure.vars.config\nexport MONGOOSEIM_CONFIGURED=\"yes\"\nexport APPS=\"mysql eodbc epgsql eredis nksip cqerl tirerl erlcloud\"\nexport PREFIX=\"/tmp/mim-sandbox-system\"\nexport RELTOOL_VARS=\"rel/configure.vars.config\"\nexport SYSTEM=\"yes\"\nexport RUNNER_USER=\"erszcz\"\nexport BIN_DIR=\"$PREFIX/usr/bin\"\nexport ETC_DIR=\"$PREFIX/etc/mongooseim\"\nexport LIB_DIR=\"$PREFIX/usr/lib/mongooseim\"\nexport LOG_DIR=\"$PREFIX/var/log/mongooseim\"\nexport MDB_DIR=\"$PREFIX/var/lib/mongooseim\"\nexport LOCK_DIR=\"$PREFIX/var/lock/mongooseim\"\nexport PID_DIR=\"$PREFIX/var/lib/mongooseim\"\nexport STATUS_DIR=\"$PREFIX/var/lib/mongooseim\"\n{mongooseim_runner_user, \"erszcz\"}.\n{mongooseim_script_dir, \"/tmp/mim-sandbox-system/usr/lib/mongooseim/bin\"}.\n{mongooseim_etc_dir, \"/tmp/mim-sandbox-system/etc/mongooseim\"}.\n{mongooseim_log_dir, \"/tmp/mim-sandbox-system/var/log/mongooseim\"}.\n{mongooseim_mdb_dir, \"/tmp/mim-sandbox-system/var/lib/mongooseim\"}.\n{mongooseim_pid_dir, \"/tmp/mim-sandbox-system/var/lib/mongooseim\"}.\n{mongooseim_status_dir, \"/tmp/mim-sandbox-system/var/lib/mongooseim\"}.\n{mongooseim_mdb_dir_toggle, []}.\n{mongooseim_lock_dir, \"/tmp/mim-sandbox-system/var/lock/mongooseim\"}.\n

Installed tree:

$ tree mim-sandbox-system/ -L 3\nmim-sandbox-system/\n\u251c\u2500\u2500 etc\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 mongooseim\n\u2502\u00a0\u00a0     \u251c\u2500\u2500 app.config\n\u2502\u00a0\u00a0     \u251c\u2500\u2500 mongooseim.toml\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 vm.args\n\u251c\u2500\u2500 usr\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 bin\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 mongooseimctl\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 lib\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 mongooseim\n\u2514\u2500\u2500 var\n    \u251c\u2500\u2500 lib\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 mongooseim\n    \u251c\u2500\u2500 lock\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 mongooseim\n    \u2514\u2500\u2500 log\n        \u2514\u2500\u2500 mongooseim\n\n13 directories, 4 files\n

Files which change after starting and stopping such an installation:

var/lib/mongooseim/DECISION_TAB.LOG\nvar/lib/mongooseim/LATEST.LOG\nvar/lib/mongooseim/last_activity.DCD\nvar/lib/mongooseim/muc_registered.DCD\nvar/lib/mongooseim/muc_room.DCD\nvar/lib/mongooseim/offline_msg.DAT\nvar/lib/mongooseim/passwd.DCD\nvar/lib/mongooseim/privacy.DCD\nvar/lib/mongooseim/private_storage.DAT\nvar/lib/mongooseim/roster.DCD\nvar/lib/mongooseim/roster_version.DCD\nvar/lib/mongooseim/schema.DAT\nvar/lib/mongooseim/vcard.DAT\nvar/lib/mongooseim/vcard_search.DCD\nvar/lib/mongooseim/pid\nvar/lib/mongooseim/status\nvar/log/mongooseim/crash.log\nvar/log/mongooseim/mongooseim.log\nvar/log/mongooseim/erlang.log.1\nvar/log/mongooseim/run_erl.log\n
"},{"location":"developers-guide/release_config/#caveats","title":"Caveats","text":"
  • Running make install will blindly overwrite any configs it encounters on its way. Mnesia database and log files are preserved only due to the fact that they're not build process artifacts.
"},{"location":"developers-guide/xep_tool/","title":"XEP-tool usage","text":"

The XEP-tool is the answer for developers who wonder how to maintain an actual list of supported XEPs. It's a fast and easy way to automatically produce documentation from raw, beam files. This is a quick guide on how to enjoy the usage of the XEP-tool.

"},{"location":"developers-guide/xep_tool/#sign-your-module-file-first","title":"Sign your module file first","text":"

The architecture of MongooseIM determines that almost every XEP or feature implementation resides in its own file. It is not strictly enforced but usually the file is named with a mod_ prefix. For example mod_privacy file implements XEP-0016: Privacy Lists.

"},{"location":"developers-guide/xep_tool/#mandatory-xep-and-version","title":"Mandatory xep and version","text":"

In order to let the XEP-tool know about your module, we add a special attribute xep at the beginning of the mod_privacy module:

-xep([{xep, 16}, {version, \"1.6\"}]).\n

Now we know that this module implements to XEP-0016: Privacy Lists with version 1.6.

It gives the tool enough information to generate a URL to the XEP homepage. If the module implements a ProtoXEP, the xep property should be an atom:

-xep([{xep, 'muc-light'}, {version, \"0.0.1\"}]).\n

You ought to remember to specify xep and version properties every time. You can also put several xep attributes in one module. For example mod_roster implements attributes of XEP-0083: Nested Roster Groups, XEP-0093: Roster Item Exchange and XEP-0237: Roster Versioning. Just list them one after another:

-xep([{xep, 237}, {version, \"1.3\"}]).\n-xep([{xep, 83}, {version, \"1.0\"}]).\n-xep([{xep, 93}, {version, \"1.2\"}]).\n
"},{"location":"developers-guide/xep_tool/#status","title":"Status","text":"

By default the status is complete. You can also mark the implementation status as partial:

-xep([{xep, 79}, {version, \"1.2\"}, {status, partial}]).\n
"},{"location":"developers-guide/xep_tool/#legacy-versions","title":"Legacy versions","text":"

Sometimes the implementation is backwards-compatible, supporting legacy namespaces defined in a particular historical version. You can list such versions as below:

-xep([{xep, 313}, {version, \"0.6\"}, {legacy_versions, [\"0.5\"]}]).\n

Warning

Watch out for conflicts! Put the xep attribute in the main module implementing the extension to avoid conflicting declarations. If you need to specify the same XEP multiple times (e.g. because the implementation is split into two parts), make sure that the version properties are the same for all attributes - otherwise the XEP tool will report an error. The resulting status for a XEP is complete unless all implementing modules have the partial status.

"},{"location":"developers-guide/xep_tool/#compile-and-run","title":"Compile and run","text":"

After annotating all modules with the xep attributes, you need to run make in order to generate the .beam files. Next, you can run the XEP tool. It has a mandatory argument, which specifies the output format:

  • markdown - a Markdown table,
  • list - an Erlang list,
  • doap - Description Of A Project.

For example, to print out the DOAP, you can run the following from the MongooseIM project directory:

tools/xep_tool/xep_tool.escript doap\n

To save the output to a file, you can just provide the file name as the second argument.

tools/xep_tool/xep_tool.escript doap doc/mongooseim.doap\ntools/xep_tool/xep_tool.escript markdown doc/user-guide/Supported-XEPs.md\n

The last two commands have a shortcut in the Makefile:

make xeplist\n
"},{"location":"developers-guide/xep_tool/#examples-of-generated-content","title":"Examples of generated content","text":"
  • Markdown table
  • DOAP file
"},{"location":"getting-started/Installation/","title":"Installation","text":"

There are multiple ways in which you can get MongooseIM:

  • install MongooseIM binaries from a package Erlang Solutions delivers,
  • get the Docker image,
  • use the Helm chart.

Alternatively, check out our tutorial on How to build MongooseIM from source code for an introduction to compiling, building and testing MongooseIM.

"},{"location":"getting-started/Installation/#packages","title":"Packages","text":"

Go to the downloads section of the Erlang Solutions website, and choose the version of MongooseIM you want. The following sections describe the installation process for different operating systems.

Ubuntu and DebianCentOS compatible: AlmaLinux / Rocky Linux

Once the deb file is downloaded, open a terminal window and navigate to the directory containing the package. Use the following command to unpack and install MongooseIM:

sudo dpkg -i mongooseim_[version here].deb\n

An ODBC (RDBMS) driver must be installed on your machine to unpack and install from RPM packages. Enter the following command in a terminal window to install the latest unixODBC driver:

sudo yum install unixODBC\n

Once the RPM file is downloaded, open a terminal window and navigate to the directory containing the package. Use the following command to unpack and install MongooseIM:

sudo rpm -i mongooseim_[version here].rpm\n
"},{"location":"getting-started/Installation/#docker","title":"Docker","text":"

In order to install MongooseIM using Docker, simply run the following command:

docker pull mongooseim/mongooseim\n

This will download the latest release. You can use tags to download an exact version.

We build Docker images for every release marked with a git tag, as well as for every Pull Request. You can see all of them on DockerHub. In order to learn more about how the images are built, please visit the source code repository.

The mongooseimctl command is available in /usr/lib/mongooseim/bin/mongooseimctl in the container.

"},{"location":"getting-started/Installation/#helm","title":"Helm","text":"

You can easily install MongooseIM to a Kubernetes cluster with the help of our Helm chart, defined in the source code repository. After you have a Kubernetes cluster set up, simply run:

helm repo add mongoose https://esl.github.io/MongooseHelm/\n

to add our chart repository, and then:

helm install my-mongooseim mongoose/mongooseim\n

to install the chart. You can use any name instead of my-mongooseim, or generate a random name.

"},{"location":"getting-started/Installation/#source","title":"Source","text":"

Please see the tutorial How to build MongooseIM from source code.

"},{"location":"getting-started/Quick-setup/","title":"Quick Setup","text":"

In this short guide we will set MongooseIM up and get users chatting right away. The goal is to get to know MongooseIM, set it up, go through basic operations and validation.

You should have MongooseIM already installed on your machine and the mongooseimctl command available. If you have not installed MIM, please refer to the installation instructions.

Warning

This setup is not intended for production.

Note

This procedure has been tested on an Ubuntu 18.04.x LTS.

"},{"location":"getting-started/Quick-setup/#running-mongooseim","title":"Running MongooseIM","text":"

Warning

MongooseIM will use its default database - Mnesia, which is faster and simpler to set up, but not intended for production purposes when it comes to persistent data.

It is possible to use external databases instead - for more information, see the database backend configuration page.

The following command will start the MongooseIM server:

mongooseimctl start\n

When you change the config file and want to restart the MongooseIM server:

mongooseimctl restart\n

Use the following command to stop the MongooseIM server:

mongooseimctl stop\n
This takes a few seconds.

At any given time, the following command shows the status of a MongooseIM server:

mongooseimctl status\n
If the command replies nodedown then MongooseIM is not running. Else it will show its status starting, started, or stopping, and its version.

When needed, you can also launch the server in the interactive mode:

mongooseimctl live\n
This will allow you to better detect and understand the errors in the configuration. When MongooseIM is properly running, the Erlang shell/console is then shown. Just type Control-C twice to exit, the server will then be shut down.

For running MongooseIM in a non-interactive way within a supervision system (e.g. systemd), it is recommended to use the foreground mode:

mongooseimctl foreground\n
Typing Control-C will stop the server.

You can check server loglevel:

mongooseimctl get_loglevel\n

Run bootstrap scripts for initial configuration:

mongooseimctl bootstrap\n

It executes scripts inside the scripts/ directory with a bootstrap prefix in alphabetical order. More information

Execute Hello from the scripts/bootstrap01-hello.sh script that you can find in the release directory $REPO_DIR/_build/prod/rel/mongooseim.

"},{"location":"getting-started/Quick-setup/#chat-users","title":"Chat users","text":""},{"location":"getting-started/Quick-setup/#registering-creating-users","title":"Registering (creating) users","text":"

The default XMPP domain served by MongooseIM right after installation is localhost.

You can register (create) users with the mongooseimctl utility.

This command registers the user user@localhost using password secret.

mongooseimctl account registerUser --username user --domain localhost --password secret\n
Examples:
mongooseimctl account registerUser --username alice --domain localhost --password qwerty\nmongooseimctl account registerUser --username bob --domain localhost --password 12345678\nmongooseimctl account registerUser --username carol --domain localhost --password abc123\nmongooseimctl account registerUser --username dan --domain localhost --password dan\n

Warning

The password is entered manually in the command line and history is accessible to the command line users. This method is not recommended for production use, you may prefer for example LDAP.

You can check that the user account has been created:

mongooseimctl account checkUser --user alice@localhost\n{\n  \"data\" : {\n    \"account\" : {\n      \"checkUser\" : {\n        \"message\" : \"User alice@localhost exists\",\n        \"exist\" : true\n      }\n    }\n  }\n}\n

Now you can list all registered users in your host:

mongooseimctl account listUsers --domain localhost\n{\n  \"data\" : {\n    \"account\" : {\n      \"listUsers\" : [\n        \"alice@localhost\",\n        \"bob@localhost\",\n        \"carol@localhost\",\n        \"dan@localhost\"\n      ]\n    }\n  }\n}\n

If you want to delete a user from your host:

mongooseimctl account removeUser --user dan@localhost\n{\n  \"data\" : {\n    \"account\" : {\n      \"removeUser\" : {\n        \"message\" : \"User dan@localhost successfully unregistered\",\n        \"jid\" : \"dan@localhost\"\n      }\n    }\n  }\n}\n

"},{"location":"getting-started/Quick-setup/#populate-the-contact-lists-rosters","title":"Populate the contact lists (rosters)","text":"

As an example, let's add bob@localhost as a contact of alice@localhost:

mongooseimctl roster addContact --user alice@localhost --contact bob@localhost --groups '[\"friends\"]' --name Bobby\n{\n  \"data\" : {\n    \"roster\" : {\n      \"addContact\" : \"Contact added successfully\"\n    }\n  }\n}\n

You need to quote [\"friends\"] because it is a list of strings - JSON syntax is required for such complex types. The single quotes are there to prevent bash from interpreting special characters like \". If you want alice@locahost to receive presences from bob@localhost, you need to firstly request the subscription:

mongooseimctl roster subscription --user alice@localhost --contact bob@localhost --action INVITE\n{\n  \"data\" : {\n    \"roster\" : {\n      \"subscription\" : \"Subscription stanza with type subscribe sent successfully\"\n    }\n  }\n}\n

Then, accept the subscription request:

mongooseimctl roster subscription --user bob@localhost --contact alice@localhost --action ACCEPT\n{\n  \"data\" : {\n    \"roster\" : {\n      \"subscription\" : \"Subscription stanza with type subscribed sent successfully\"\n    }\n  }\n}\n

Verify the contact list:

mongooseimctl roster listContacts --user alice@localhost\n{\n  \"data\" : {\n    \"roster\" : {\n      \"listContacts\" : [\n        {\n          \"subscription\" : \"TO\",\n          \"name\" : \"Bobby\",\n          \"jid\" : \"bob@localhost\",\n          \"groups\" : [\n            \"friends\"\n          ],\n          \"ask\" : \"NONE\"\n        }\n      ]\n    }\n  }\n}\n

Note that bob@localhost has alice@localhost in his contacts as well, but he is not subscribed to her presences - the subscriptions are unidirectional.

mongooseimctl roster listContacts --user bob@localhost\n{\n  \"data\" : {\n    \"roster\" : {\n      \"listContacts\" : [\n        {\n          \"subscription\" : \"FROM\",\n          \"name\" : \"\",\n          \"jid\" : \"alice@localhost\",\n          \"groups\" : [\n\n          ],\n          \"ask\" : \"NONE\"\n        }\n      ]\n    }\n  }\n}\n

To quickly set up mutual subscriptions between users, you can use mongooseimctl roster setMutualSubscription.

"},{"location":"getting-started/Quick-setup/#basic-mongooseim-configuration","title":"Basic MongooseIM configuration","text":"

The main configuration file of MongooseIM is mongooseim.toml:

/etc/mongooseim/mongooseim.toml\n
You can edit this file to tailor MongooseIM to your needs. Learn more about MongooseIM configuration files in general, or jump right into the documentations of different mongooseim.toml sections.

For each change, edit the configuration file using the right Linux/Unix user. Save (and optionally backup, archive, or version) the configuration file and restart the MongooseIM server.

"},{"location":"getting-started/Quick-setup/#logging","title":"Logging","text":"

Set your own loglevel in the configuration file:

[general]\n  loglevel = \"notice\"\n

Save and exit your editor, restart MongooseIM and check your loglevel from the command line:

mongooseimctl get_loglevel\n

Read the mongooseim.log file:

/var/log/mongooseim/mongooseim.log\n

You can use commands such cat, more or less, even head or tail. In order to see live logs:

tail -f /var/log/mongooseim/mongooseim.log\n
Type Ctrl+C to exit.

"},{"location":"getting-started/Quick-setup/#muc-multi-user-chat-for-groupchats","title":"MUC (Multi-User Chat) for groupchats","text":"

Enable MUC, or Multi-User Chat, for groupchats/channels in the mongooseim.toml file:

[modules.mod_muc]\n  host = \"muc.@HOST@\"\n  access = \"muc\"\n  access_create = \"muc_create\"\n

"},{"location":"getting-started/Quick-setup/#roster-versioning","title":"Roster versioning","text":"

For faster contact list downloads at each client/app (re)connection, edit the configuration file:

[modules.mod_roster]\n  versioning = true\n  store_current_id = true\n

"},{"location":"getting-started/Quick-setup/#review-configuration","title":"Review configuration","text":"

If MongooseIM does not start because the configuration file is broken in some way:

mongooseimctl live\n

"},{"location":"getting-started/Quick-setup/#using-an-xmppjabber-clientapp","title":"Using an XMPP/Jabber client/app","text":"

The following steps use the registered users on the MongooseIM server, done above.

Users that are registered on your server can now add their accounts in a chat application like Gajim (specifying either the server\u2019s IP address or domain name), and start chatting!

"},{"location":"getting-started/Quick-setup/#note-about-session-conflicts","title":"Note about session conflicts","text":"

If you're going to connect several clients with the same username and domain (for example a phone and a laptop), please make sure they are using different resource names (a kind of device/client identifier). This should be configurable in the account settings of every XMPP client.

Otherwise, the clients will keep disconnecting each other, because MongooseIM always terminates the older session in case of a conflict.

"},{"location":"getting-started/Quick-setup/#connect-gajim","title":"Connect Gajim","text":"

Gajim is available on many Linux platforms, macOS & Windows.

Warning

Gajim has an obsolete UX. However, it is still well maintained, and has a console that is extremely useful for debugging and testing/validation purposes at the XMPP protocol level.

  1. Launch Gajim. Ignore the window with Plugin updates.
  2. Go to Edit -> Accounts.
  3. Click Add in the left part of the window and select I already have an account I want to use, click Forward
  4. Enter the user, domain and password for the accounts registered previously on the command line
  5. Click Forward and then Finish
  6. Ignore the TLS/SSL error/warning and continue
  7. Close the Account window.

Add your three created users: alice, bob, and carol.

Check what users are currently connected.

mongooseimctl session listSessions\n{\n  \"data\" : {\n    \"session\" : {\n      \"listSessions\" : [\n        {\n          \"user\" : \"bob@localhost/BobsComputer,\n          \"uptime\" : 12,\n          \"priority\" : 50,\n          \"port\" : 56267,\n          \"node\" : \"mongooseim@localhost\",\n          \"ip\" : \"127.0.0.1\",\n          \"connection\" : \"c2s_tls\"\n        }\n      ]\n    }\n  }\n}\n

The result shows that Bob is currently connected.

"},{"location":"getting-started/Quick-setup/#chat-with-another-person","title":"Chat with another person","text":"

Use alice's account to send messages directly to bob and use bob's account to reply directly to alice.

It is possible to send a message from the command line:

mongooseimctl stanza sendMessage --from alice@localhost --to bob@localhost --body 'Hi Bob!'\n

You need to quote Hi Bob!, because it contains a space. If you do it while Bob is connected, he should receive the message in the XMPP client.

"},{"location":"getting-started/Quick-setup/#group-chats","title":"Group chats","text":"

Use alice's account to create a groupchat channel on your muc.localhost service, and configure it by making it persistent. Invite bob and carol. From bob's' and carol's accounts, accept the invitation and join the channel groupchat. All three users exchange messages.

"},{"location":"getting-started/Quick-setup/#contact-lists","title":"Contact lists","text":"

Use carol's account to add alice and bob to her contact list. Use alice's and bob's accounts accept those additions.

Verify on the MongooseIM server:

mongooseimctl roster listContacts --user alice@localhost\nmongooseimctl roster listContacts --user bob@localhost\n

"},{"location":"getting-started/Quick-setup/#profile-vcard","title":"Profile (vCard)","text":"

Edit alice's profile (vCard) in Gajim: Modify Account..., then Profile, just set her Name to Alice.

Verify on the MongooseIM server:

mongooseimctl vcard getVcard --user alice@localhost\n{\n  \"data\" : {\n    \"vcard\" : {\n      \"getVcard\" : {\n        (...)\n        \"telephone\" : [\n          {\n            \"tags\" : [\n              \"HOME\",\n              \"VOICE\"\n            ],\n            \"number\" : \"123456789\"\n          }\n        ],\n        (...)\n        \"formattedName\" : \"Alice\",\n        (...)\n      }\n    }\n  }\n}\n

"},{"location":"getting-started/Quick-setup/#summary","title":"Summary","text":"

Now you have the minimum knowledge: you know how to deploy MongooseIM, configure some basic features, check/verify a few useful items, validate it both on the client and server side, and utilize a few good practices.

"},{"location":"getting-started/Quick-setup/#summary-command-line","title":"Summary: command line","text":"

You know mongooseimctl, with basic server management commands such as:

  • start, restart, stop, status, live, foreground
  • get_loglevel

Other commands shown above correspond to the GraphQL Admin API operations, and they are grouped into the following categories:

  • account contains registerUser, checkUser, listUsers, removeUser
  • roster contains addContact, subscription, listContacts, setMutualSubscription
  • session contains listSessions
  • stanza contains sendMessage
  • vcard contains getVcard

There are more categories and commands. For a list of categories, use mongooseimctl without any arguments. To get a list of commands in a particular category, call mongooseimctlcategory. You can also get more information about a particular command with mongooseimctlcategory command--help.

"},{"location":"getting-started/Quick-setup/#summary-files","title":"Summary: files","text":"

You know basic entries in the files: /etc/mongooseim/mongooseim.toml /var/log/mongooseim/mongooseim.log

"},{"location":"getting-started/Quick-setup/#summary-clientapp","title":"Summary: client/app","text":"

In an app, you know how to:

  • connect
  • chat with another user
  • create/join groupchats
  • manage contact lists (roster)
  • edit profile (vCard)
"},{"location":"getting-started/Quick-setup/#go-further","title":"Go further","text":"

For the next steps, we now encourage you to:

  1. Deploy it as a single node, on a publicly accessible server, with a real routable domain name with its certificate
  2. Add an RDBMS for persistent data, and LDAP for user directory
  3. Enable message history with MAM (Message Archive Management)
  4. Enable file exchange with HTTP file upload, with an S3-compatible object storage server
  5. Use a mobile app for users to chat
"},{"location":"graphql-api/Admin-GraphQL/","title":"MongooseIM's GraphQL API for the administrator","text":"

The new GraphQL admin API contains all the commands available through the REST API, and the vast majority of the CLI (mongooseimctl) commands. Only commands that wouldn't have worked well with GraphQL style have been omitted.

We can distinguish two levels of the administration. A global admin (has access to all commands), and the admin per domain (has access only to the own domain). Each of them is handled by a different endpoint. Please see the configuration Listen section for more details.

There is only one schema for both admin types. Admin per domain simply has no permissions to execute global commands or commands with not owned domain. The API documentation clearly says which commands are global.

Queries and mutations can be executed with the POST or GET method, as specified in the GraphQL documentation. The endpoint URL is as configured in the Listen section, e.g. http://localhost:5551/api/graphql for the global admin.

Subscriptions can be executed with the GET method, and are handled with Server-Sent Events (SSE). The endpoint URL is the same as for regular queries with the addition of /sse, e.g. http://localhost:5551/api/graphql/sse for the global admin.

"},{"location":"graphql-api/Admin-GraphQL/#domain-admin-configuration","title":"Domain admin configuration","text":"

Out of the box, domains are created with a disabled admin account. Admin per domain can be enabled only by the global admin with the command mutation.domains.setDomainPassword. Afterward, the domain admin can change the password with the same command.

The admin per domain can be disabled by the global admin with the command mutation.domains.removeDomainPassword.

"},{"location":"graphql-api/Admin-GraphQL/#authentication","title":"Authentication","text":"

MongooseIM uses Basic Authentication as an authentication method for the GraphQL API.

Basic authentication is a simple authentication scheme built into the HTTP protocol. Each HTTP request to the GraphQL API has to contain the Authorization header with the word Basic followed by a space and a base64-encoded string.

"},{"location":"graphql-api/Admin-GraphQL/#global-admin-endpoint","title":"Global admin endpoint","text":"

The authentication for global admin is optional because this endpoint shouldn't be exposed outside. The credentials set in the handler section in the config enables the authentication. Please see the GraphQL handler section for more details.

The base64-encoded string should have the form LOGIN:PASSWORD, where:

  • LOGIN is the login set in the config,
  • PASSWORD is the password set in the config.
"},{"location":"graphql-api/Admin-GraphQL/#domain-admin-endpoint","title":"Domain admin endpoint","text":"

The authorization as a domain admin the base64-encoded string should have the form admin@DOMAIN:PASSWORD, where:

  • DOMAIN is the domain to authorize,
  • PASSWORD is the password for the given domain.
"},{"location":"graphql-api/Admin-GraphQL/#graphiql","title":"GraphiQL","text":"

GraphiQL is the GraphQL integrated development environment (IDE). It allows to experiment with API and run queries with ease. The GraphiQL page is automatically served with each GraphQL endpoint. For example:

http://localhost:5551/api/graphql

Open the above address in your browser and try to use it.

"},{"location":"graphql-api/Admin-GraphQL/#authorization","title":"Authorization","text":"

Executing some of the queries requires authorization. Just add the following JSON into the header tab. Remember to update the credentials.

{\n   \"Authorization\": \"Basic YWxpY2VAbG9jYWxob3N0OnNlY3JldA==\"\n}\n
"},{"location":"graphql-api/Admin-GraphQL/#static-documentation","title":"Static documentation","text":"

Open GraphQL documentation as a full page

"},{"location":"graphql-api/User-GraphQL/","title":"MongooseIM's GraphQL API for the user","text":"

The new GraphQL user API contains all commands from the client REST API and provides plenty of new ones. Multiple commands previously available only for the admin have their counterparts for the user.

Queries and mutations can be executed with the POST or GET method, as specified in the GraphQL documentation. The endpoint URL is as configured in the Listen section, e.g. http://localhost:5561/api/graphql.

Subscriptions can be executed with the GET method, and are handled with Server-Sent Events (SSE). The endpoint URL is the same as for regular queries with the addition of /sse, e.g. http://localhost:5561/api/graphql/sse.

"},{"location":"graphql-api/User-GraphQL/#authentication","title":"Authentication","text":"

MongooseIM uses Basic Authentication as the authentication method for the GraphQL API.

Basic authentication is a simple authentication scheme built into the HTTP protocol. Each HTTP request to the client REST API has to contain the Authorization header with the word Basic followed by a space and a base64-encoded string username@host:password, where:

  • username@host is the user's bare JID,
  • password is the password used to register the user's account.

For example, to authorize as alice@localhost with the password secret, the client would send a header:

Authorization: Basic YWxpY2VAbG9jYWxob3N0OnNlY3JldA==\n
"},{"location":"graphql-api/User-GraphQL/#graphiql","title":"GraphiQL","text":"

GraphiQL is the GraphQL integrated development environment (IDE). It allows to experiment with API and run queries with ease. The GraphiQL page is automatically served with each GraphQL endpoint. For example:

http://localhost:5561/api/graphql

Open the above address in your browser and try to use it.

"},{"location":"graphql-api/User-GraphQL/#authorization","title":"Authorization","text":"

Executing some of the queries requires authorization. Just add the following JSON into the header tab. Remember to update the credentials.

{\n   \"Authorization\": \"Basic YWxpY2VAbG9jYWxob3N0OnNlY3JldA==\"\n}\n
"},{"location":"graphql-api/User-GraphQL/#static-documentation","title":"Static documentation","text":"

Open GraphQL documentation as a full page

"},{"location":"listeners/listen-c2s/","title":"Client to server (C2S): [[listen.c2s]]","text":"

Handles XMPP client-to-server (C2S) connections. The recommended port number for a C2S listener is 5222 as registered in the XMPP protocol.

"},{"location":"listeners/listen-c2s/#configuration-options","title":"Configuration options","text":"

The following options are supported for each C2S listener:

"},{"location":"listeners/listen-c2s/#listenc2saccess","title":"listen.c2s.access","text":"
  • Syntax: string, rule name or \"all\"
  • Default: \"all\"
  • Example: access = \"c2s\"

The rule that determines who is allowed to connect. By default, the rule is \"all\", which means that anyone can connect. The rule referenced here needs to be defined in the access configuration section.

"},{"location":"listeners/listen-c2s/#listenc2sshaper","title":"listen.c2s.shaper","text":"
  • Syntax: string, rule name
  • Default: \"none\" (no shaper)
  • Example: shaper = \"c2s_shaper\"

The rule that determines what traffic shaper is used to limit the incoming XMPP traffic to prevent the server from being flooded with incoming data. The rule referenced here needs to be defined in the access configuration section. The value of the access rule needs to be either the shaper name or the string \"none\", which means no shaper.

"},{"location":"listeners/listen-c2s/#listenc2smax_connections","title":"listen.c2s.max_connections","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_connections = 10000

Maximum number of open connections. This is a soft limit according to the Ranch documentation.

"},{"location":"listeners/listen-c2s/#listenc2sc2s_state_timeout","title":"listen.c2s.c2s_state_timeout","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 5000
  • Example: c2s_state_timeout = 10_000

Timeout value (in milliseconds) used by the C2S state machine when waiting for the connecting client to respond during stream negotiation and SASL authentication. After the timeout the server responds with the connection-timeout stream error and closes the connection.

"},{"location":"listeners/listen-c2s/#listenc2sreuse_port","title":"listen.c2s.reuse_port","text":"
  • Syntax: boolean
  • Default: false
  • Example: reuse_port = true

Enables linux support for SO_REUSEPORT, see Stack Overflow for more details.

"},{"location":"listeners/listen-c2s/#listenc2sbackwards_compatible_session","title":"listen.c2s.backwards_compatible_session","text":"
  • Syntax: boolean
  • Default: true
  • Example: backwards_compatible_session = false

Enables backward-compatible session establishment IQs. See https://www.rfc-editor.org/rfc/rfc6121.html#section-1.4:

[RFC3921] specified one additional precondition: formal establishment of an instant messaging and presence session. Implementation and deployment experience has shown that this additional step is unnecessary. However, for backward compatibility an implementation MAY still offer that feature. This enables older software to connect while letting newer software save a round trip.

"},{"location":"listeners/listen-c2s/#listenc2sallowed_auth_methods","title":"listen.c2s.allowed_auth_methods","text":"
  • Syntax: array of strings. Allowed values: \"internal\", \"rdbms\", \"external\", \"anonymous\", \"ldap\", \"jwt\", \"http\", \"pki\", \"dummy\"
  • Default: not set
  • Example: allowed_auth_methods = [\"internal\"]

A subset of enabled methods to login with for this listener. This option allows to enable only some backends. It is useful, if you want to have several listeners for different type of users (for example, some users use PKI while other users use LDAP auth). Same syntax as for auth.methods option.

"},{"location":"listeners/listen-c2s/#tls-options-for-c2s","title":"TLS options for C2S","text":"

To enable TLS, a TOML subsection called tls has to be present in the listener options. To disable TLS, make sure that the section is not present, and no TLS options are set. You can set the following options in this section:

"},{"location":"listeners/listen-c2s/#listenc2stlsmode","title":"listen.c2s.tls.mode","text":"
  • Syntax: string, one of \"tls\", \"starttls\", \"starttls_required\"
  • Default: \"starttls\"
  • Example: tls.mode = \"starttls\"

This option determines how clients are supposed to set up the TLS encryption:

  • tls - clients must initiate a TLS session immediately after connecting, before beginning the normal XML stream,
  • starttls - enables StartTLS support; requires certfile,
  • starttls_required - enables and enforces StartTLS usage.
"},{"location":"listeners/listen-c2s/#listenc2stlsmodule","title":"listen.c2s.tls.module","text":"
  • Syntax: string, one of \"just_tls\", \"fast_tls\"
  • Default: \"fast_tls\"
  • Example: tls.module = \"just_tls\"

By default, the TLS library used for C2S connections is fast_tls, which uses OpenSSL-based NIFs. It is possible to change it to just_tls - Erlang TLS implementation provided by OTP. Some TLS-related options described here have different formats for these two libraries.

"},{"location":"listeners/listen-c2s/#listenc2stlsverify_mode","title":"listen.c2s.tls.verify_mode","text":"
  • Syntax: string, one of \"peer\", \"selfsigned_peer\", \"none\"
  • Default: \"peer\"
  • Example: tls.verify_mode = \"none\"

Specifies the way client certificate verification works:

  • peer - makes sure the client certificate is valid and signed by a trusted CA. Requires a valid cacertfile.
  • selfsigned_peer - makes sure the client certificate is valid, but allows self-signed certificates; supported only by just_tls. Requires a valid cacertfile.
  • none - client certificate is not checked.
"},{"location":"listeners/listen-c2s/#listenc2stlscertfile","title":"listen.c2s.tls.certfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.certfile = \"server.pem\"

Path to the X509 PEM file with a certificate and a private key (not protected by a password). If the certificate is signed by an intermediate CA, you should specify here the whole CA chain by concatenating all public keys together and appending the private key after that.

Note

For just_tls this file should only contain the certificate and the path to the private key can be provided separately as keyfile.

"},{"location":"listeners/listen-c2s/#listenc2stlscacertfile","title":"listen.c2s.tls.cacertfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.cacertfile = \"ca.pem\"

Path to the X509 PEM file with a CA chain that will be used to verify clients. It won't have any effect if verify_mode is \"none\".

"},{"location":"listeners/listen-c2s/#listenc2stlsdhfile","title":"listen.c2s.tls.dhfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.dhfile = \"dh.pem\"

Path to the Diffie-Hellman parameter file.

"},{"location":"listeners/listen-c2s/#listenc2stlsciphers","title":"listen.c2s.tls.ciphers","text":"
  • Syntax: string with the OpenSSL cipher suite specification
  • Default: for fast_tls the default is\"TLSv1.2:TLSv1.3\". For just_tls this option is not set by default - all supported suites are accepted.
  • Example: tls.ciphers = \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384\"

Cipher suites to use with StartTLS or TLS. Please refer to the OpenSSL documentation for the cipher string format. For fast_tls, this string can be used to specify versions as well. For just_tls, see the Erlang/OTP SSL documentation for allowed values.

"},{"location":"listeners/listen-c2s/#listenc2stlsprotocol_options-only-for-fast_tls","title":"listen.c2s.tls.protocol_options - only for fast_tls","text":"
  • Syntax: array of strings
  • Default: [\"no_sslv2\", \"no_sslv3\", \"no_tlsv1\", \"no_tlsv1_1\"]
  • Example: tls.protocol_options = [\"no_tlsv1\", \"no_tlsv1_1\"]

A list of OpenSSL options for FastTLS. You can find the mappings between supported options and actual OpenSSL flags in the fast_tls source code.

"},{"location":"listeners/listen-c2s/#listenc2stlskeyfile-only-for-just_tls","title":"listen.c2s.tls.keyfile - only for just_tls","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: tls.keyfile = \"key.pem\"

Path to the X509 PEM file with the private key.

"},{"location":"listeners/listen-c2s/#listenc2stlspassword-only-for-just_tls","title":"listen.c2s.tls.password - only for just_tls","text":"
  • Syntax: string
  • Default: not set
  • Example: tls.password = \"secret\"

Password to the X509 PEM file with the private key.

"},{"location":"listeners/listen-c2s/#listenc2stlsdisconnect_on_failure-only-for-just_tls","title":"listen.c2s.tls.disconnect_on_failure - only for just_tls","text":"
  • Syntax: boolean
  • Default: true
  • Example: tls.disconnect_on_failure = false
"},{"location":"listeners/listen-c2s/#listenc2stlsversions-only-for-just_tls","title":"listen.c2s.tls.versions - only for just_tls","text":"
  • Syntax: array of strings
  • Default: not set, all supported versions are accepted
  • Example: tls.versions = [\"tlsv1.2\", \"tlsv1.3\"]

TLS versions to use with StartTLS or TLS. For allowed values, see the Erlang/OTP SSL documentation

"},{"location":"listeners/listen-c2s/#listenc2stlscrl_files-only-for-just_tls","title":"listen.c2s.tls.crl_files - only for just_tls","text":"
  • Syntax: array of strings, paths in the file system
  • Default: not set
  • Example: tls.crl_files = [\"certs.crl\"]

Specifies the paths to Certificate Revocation Lists.

"},{"location":"listeners/listen-c2s/#c2s-listener-configuration-example","title":"C2S listener configuration example","text":"

The following section configures two C2S listeners.

[[listen.c2s]]\n  port = 5222\n  access = \"c2s\"\n  shaper = \"c2s_shaper\"\n  max_stanza_size = 65536\n  tls.certfile = \"server.pem\"\n  tls.dhfile = \"dh_server.pem\"\n\n[[listen.c2s]]\n  port = 5223\n  access = \"c2s\"\n  shaper = \"c2s_shaper\"\n  max_stanza_size = 65536\n
  • One at port 5222, which accepts a plain TCP connection and allows to use StartTLS for upgrading it to an encrypted one. The files containing the certificate and the DH parameter are also provided.
  • One at port 5223, which accepts only encrypted TLS connections - this is the legacy method as StartTLS is preferred.

Both listeners use c2s and c2s_shaper rules for access management and traffic shaping, respectively.

"},{"location":"listeners/listen-components/","title":"XMPP components: [[listen.service]]","text":"

Interface for external services acting as XMPP components (XEP-0114: Jabber Component Protocol), enabling communication between MongooseIM and external services over the XMPP network. The recommended port number for a component listener is 8888.

According to XEP-0114: Jabber Component Protocol the component's hostname should be given in the element.

Warning

This interface does not support dynamic domains. Do not use them both at the same time.

"},{"location":"listeners/listen-components/#configuration-options","title":"Configuration options","text":"

The following options are supported for each component listener under listen.service subsection:

"},{"location":"listeners/listen-components/#listenserviceaccess","title":"listen.service.access","text":"
  • Syntax: string, rule name or \"all\"
  • Default: \"all\"
  • Example: access = \"component\"

Determines who is allowed to send data to external components. By default, the rule is all, which means that anyone can communicate with the components.

"},{"location":"listeners/listen-components/#listenservicepassword","title":"listen.service.password","text":"
  • Syntax: string
  • Default: no default, this option is mandatory
  • Example: password = \"secret\"

The external component needs to authenticate with this password to connect.

"},{"location":"listeners/listen-components/#listenserviceshaper_rule","title":"listen.service.shaper_rule","text":"
  • Syntax: string, name of the shaper
  • Default: \"none\"
  • Example: shaper = \"component_shaper\"

The traffic shaper used to limit the XMPP traffic to prevent the server from being flooded with incoming data. Contrary to the C2S and S2S shapers, here the shaper name directly references the shaper that needs to be defined in the shaper section.

"},{"location":"listeners/listen-components/#listenservicecheck_from","title":"listen.service.check_from","text":"
  • Syntax: boolean
  • Default: true
  • Example: check_from = false

Specifies whether the server should verify the \"from\" field in stanzas from the component.

"},{"location":"listeners/listen-components/#listenservicehidden_components","title":"listen.service.hidden_components","text":"
  • Syntax: boolean
  • Default: false
  • Example: hidden_components = true

All components connected to an endpoint with this option enabled will be considered \"hidden\".

Hidden components have a special flag enabled in the internal component table. Alone, it doesn't change the server behaviour in any way, but it may be used by other modules and extensions to execute special logic. An example would be mod_disco, which may be configured to filter out hidden components from disco results, so they won't be discoverable by clients. A reason to do so could be reduced traffic - systems with many components could return very long disco responses. Also, some deployments would like to avoid revealing some services; not because it is a security threat (this method does not prevent clients from communicating with hidden components), but rather because they are not meant to interact with clients directly (e.g. helper components for other components).

"},{"location":"listeners/listen-components/#listenserviceconflict_behaviour","title":"listen.service.conflict_behaviour","text":"
  • Syntax: string, one of: \"disconnect\", \"kick_old\"
  • Default: \"disconnect\"
  • Example: conflict_behaviour = \"kick_old\"

By default, when a component tries to connect and a registration conflict occurs, the connection is dropped with the following error:

<stream:error>\n  <conflict xmlns='urn:ietf:params:xml:ns:xmpp-streams'/>\n</stream:error>\n</stream:stream>\n

It makes implementing the reconnection logic difficult, because the old connection would not allow any other connections. By setting this option to kick_old, we drop any old connections registered at the same host before accepting new ones.

"},{"location":"listeners/listen-components/#listenservicemax_fsm_queue","title":"listen.service.max_fsm_queue","text":"
  • Syntax: positive integer
  • Default: not set - no limit
  • Example: max_fsm_queue = 1000

Message queue limit to prevent resource exhaustion; overrides the value set in the general section.

"},{"location":"listeners/listen-components/#custom-extension-to-the-protocol","title":"Custom extension to the protocol","text":"

In order to register a component for all virtual hosts served by the server (see hosts in the general section), the component must add the attribute is_subdomain=\"true\" to the opening stream element. This maybe helpful if someone wants to have a single instance of a component serving multiple virtual hosts. The is_subdomain attribute is optional and the default behaviour is as described in XEP-0114: Jabber Component Protocol.

"},{"location":"listeners/listen-components/#service-listener-configuration-example","title":"Service listener configuration example","text":"

The following section configures a service listener, accepting connections from external components. The IP address is limited to loopback to prevent connections from different hosts. All components are allowed to connect, but they need to provide the password. The shaper named fast needs to be defined in the shaper section.

[[listen.service]]\n  port = 8888\n  access = \"all\"\n  shaper_rule = \"fast\"\n  ip_address = \"127.0.0.1\"\n  password = \"secret\"\n
"},{"location":"listeners/listen-http/","title":"HTTP-based services: [[listen.http]]","text":"

Manages all HTTP-based services, such as BOSH (HTTP long-polling), WebSocket, GraphQL and REST. It uses the Cowboy web server. Recommended port number: 5280 for BOSH/WS.

"},{"location":"listeners/listen-http/#configuration-options","title":"Configuration options","text":"

Following configuration option is used to set up an HTTP handler:

"},{"location":"listeners/listen-http/#listenhttphandlers","title":"listen.http.handlers","text":"
  • Syntax: each handler is specified in a subsection starting with [[listen.http.handlers.type]] where type is one of the allowed handler types, handling different connection types:

    • mod_bosh - for BOSH connections,
    • mod_websockets - for WebSocket connections,
    • mongoose_graphql_handler - for GraphQL API,
    • mongoose_admin_api, mongoose_client_api - for REST API.

    These types are described below in more detail. The double-bracket syntax is used because there can be multiple handlers of a given type, so for each type there is a TOML array of one or more tables (subsections).

  • Default: [] - no handlers enabled, all of them need to be specified explicitly.

  • Example: two handlers, one for BOSH and one for WebSockets
      [[listen.http.handlers.mod_bosh]]\n    host = \"_\"\n    path = \"/http-bind\"\n\n  [[listen.http.handlers.mod_websockets]]\n    host = \"_\"\n    path = \"/ws-xmpp\"\n
"},{"location":"listeners/listen-http/#common-handler-options","title":"Common handler options","text":""},{"location":"listeners/listen-http/#listenhttphandlershost","title":"listen.http.handlers.*.host","text":"
  • Syntax: string
  • Default: no default, mandatory option
  • Example: host = \"localhost\"

Host name for this handler or \"_\" for any host.

"},{"location":"listeners/listen-http/#listenhttphandlerspath","title":"listen.http.handlers.*.path","text":"
  • Syntax: string
  • Default: no default, mandatory option
  • Example: path = \"/ws-xmpp\"

Path for this handler.

"},{"location":"listeners/listen-http/#handler-types-bosh-mod_bosh","title":"Handler types: BOSH - mod_bosh","text":"

The recommended configuration is shown in Example 1 below. To handle incoming BOSH traffic you need to configure the mod_bosh module in the modules section as well.

"},{"location":"listeners/listen-http/#handler-types-websockets-mod_websockets","title":"Handler types: WebSockets - mod_websockets","text":"

The recommended configuration is shown in Example 1 below. Websocket connections as defined in RFC 7395. You can pass the following optional parameters:

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketstimeout","title":"listen.http.handlers.mod_websockets.timeout","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: timeout = 60_000

The time (in milliseconds) after which an inactive user is disconnected.

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketsping_rate","title":"listen.http.handlers.mod_websockets.ping_rate","text":"
  • Syntax: positive integer
  • Default: not set - pings disabled
  • Example: ping_rate = 10_000

The time (in milliseconds) between pings sent by server. By setting this option you enable server-side pinging.

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketsmax_stanza_size","title":"listen.http.handlers.mod_websockets.max_stanza_size","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_stanza_size = 10_000

Maximum allowed incoming stanza size in bytes.

Warning

This limit is checked after the input data parsing, so it does not apply to the input data size itself.

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketsc2s_state_timeout","title":"listen.http.handlers.mod_websockets.c2s_state_timeout","text":"

Same as the C2S option

"},{"location":"listeners/listen-http/#listenhttphandlersmod_websocketsbackwards_compatible_session","title":"listen.http.handlers.mod_websockets.backwards_compatible_session","text":"

Same as the C2S option

"},{"location":"listeners/listen-http/#handler-types-graphql-api-mongoose_graphql_handler","title":"Handler types: GraphQL API - mongoose_graphql_handler","text":"

For more information about the API, see the Admin interface and User interface documentation. The following options are supported for this handler:

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlerschema_endpoint","title":"listen.http.handlers.mongoose_graphql_handler.schema_endpoint","text":"
  • Syntax: string, one of \"admin\", \"domain_admin\", \"user\"
  • Default: no default, this option is mandatory
  • Example: schema_endpoint = \"admin\"

Specifies the schema endpoint:

  • admin - Endpoint with the admin commands. A global admin has permission to execute all commands. See the recommended configuration - Example 2.
  • domain_admin - Endpoint with the admin commands. A domain admin has permission to execute only commands with the owned domain. See the recommended configuration - Example 3.
  • user - Endpoint with the user commands. Used to manage the authorized user. See the recommended configuration - Example 4.
"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlerusername-only-for-admin","title":"listen.http.handlers.mongoose_graphql_handler.username - only for admin","text":"
  • Syntax: string
  • Default: not set
  • Example: username = \"admin\"

When set, enables authentication for the admin API, otherwise it is disabled. Requires setting password.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlerpassword-only-for-admin","title":"listen.http.handlers.mongoose_graphql_handler.password - only for admin","text":"
  • Syntax: string
  • Default: not set
  • Example: password = \"secret\"
"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlerallowed_categories","title":"listen.http.handlers.mongoose_graphql_handler.allowed_categories","text":"
  • Syntax: non-empty array of strings. Allowed values: \"checkAuth\", \"account\", \"domain\", \"last\", \"muc\", \"muc_light\", \"session\", \"stanza\", \"roster\", \"vcard\", \"private\", \"metric\", \"stat\", \"gdpr\", \"mnesia\", \"server\", \"inbox\", \"http_upload\", \"offline\", \"token\"
  • Default: all GraphQL categories enabled
  • Example: allowed_categories = [\"domain\", \"last\"]

By default, when the option is not included, all GraphQL categories are enabled, so you don't need to add this option. When this option is added, only listed GraphQL categories will be processed. For others, the error \"category disabled\" will be returned.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_graphql_handlersse_idle_timeout","title":"listen.http.handlers.mongoose_graphql_handler.sse_idle_timeout","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 3600000
  • Example: schema_endpoint = \"admin\"

This option specifies the time in milliseconds after which the SSE connection is closed when idle. The default value is 1 hour.

"},{"location":"listeners/listen-http/#handler-types-rest-api-admin-mongoose_admin_api","title":"Handler types: REST API - Admin - mongoose_admin_api","text":"

The recommended configuration is shown in Example 5 below. For more information about the API, see the REST interface documentation. The following options are supported for this handler:

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_admin_apiusername","title":"listen.http.handlers.mongoose_admin_api.username","text":"
  • Syntax: string
  • Default: not set
  • Example: username = \"admin\"

When set, enables authentication for the admin API, otherwise it is disabled. Requires setting password.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_admin_apipassword","title":"listen.http.handlers.mongoose_admin_api.password","text":"
  • Syntax: string
  • Default: not set
  • Example: password = \"secret\"

Required to enable authentication for the admin API.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_admin_apihandlers","title":"listen.http.handlers.mongoose_admin_api.handlers","text":"
  • Syntax: array of strings. Allowed values: \"contacts\", \"users\", \"sessions\", \"messages\", \"stanzas\", \"muc_light\", \"muc\", \"inbox\", \"domain\", \"metrics\".
  • Default: all API handler modules enabled
  • Example: handlers = [\"domain\"]

The admin API consists of several handler modules, each of them implementing a subset of the functionality. By default, all modules are enabled, so you don't need to change this option.

"},{"location":"listeners/listen-http/#handler-types-rest-api-client-mongoose_client_api","title":"Handler types: REST API - Client - mongoose_client_api","text":"

The recommended configuration is shown in Example 6 below. Please refer to REST interface documentation for more information. The following options are supported for this handler:

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_client_apihandlers","title":"listen.http.handlers.mongoose_client_api.handlers","text":"
  • Syntax: array of strings. Allowed values: \"sse\", \"messages\", \"contacts\", \"rooms\", \"rooms_config\", \"rooms_users\", \"rooms_messages\".
  • Default: all API handler modules enabled
  • Example: handlers = [\"messages\", \"sse\"]

The client API consists of several handler modules, each of them implementing a subset of the functionality. By default, all modules are enabled, so you don't need to change this option.

"},{"location":"listeners/listen-http/#listenhttphandlersmongoose_client_apidocs","title":"listen.http.handlers.mongoose_client_api.docs","text":"
  • Syntax: boolean
  • Default: true
  • Example: docs = \"false\"

The Swagger documentation of the client API is hosted at the /api-docs path. You can disable the hosted documentation by setting this option to false.

"},{"location":"listeners/listen-http/#transport-options","title":"Transport options","text":"

The options listed below are used to modify the HTTP transport settings.

"},{"location":"listeners/listen-http/#listenhttptransportnum_acceptors","title":"listen.http.transport.num_acceptors","text":"
  • Syntax: positive integer
  • Default: 100
  • Example: transport.num_acceptors = 10

Number of HTTP connection acceptors.

"},{"location":"listeners/listen-http/#listenhttptransportmax_connections","title":"listen.http.transport.max_connections","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 1024
  • Example: transport.max_connections = \"infinity\"

Maximum number of open connections. The default value of 1024 is set by the Ranch library.

"},{"location":"listeners/listen-http/#tls-https-options","title":"TLS (HTTPS) options","text":"

By default, the HTTP listener does not use TLS. To use TLS (HTTPS), you need to add a TOML table (subsection) called tls to the config file with the certfile and keyfile options that specify the location of the certificate and private key files, respectively. If the keyfile is password-protected, password is required as well. If the certificate is signed by an intermediate CA, one will probably want to specify the CA chain with the cacertfile option. The library used for HTTP is the Erlang TLS implementation provided by OTP - see ranch_ssl for details.

The options accepted here are: verify_mode, certfile, cacertfile, ciphers, keyfile, password, versions, dhfile. They have the same semantics as the corresponding c2s options for just_tls.

"},{"location":"listeners/listen-http/#protocol-options","title":"Protocol options","text":"

These are some additional options of the HTTP protocol.

"},{"location":"listeners/listen-http/#listenhttpprotocolcompress","title":"listen.http.protocol.compress","text":"
  • Syntax: boolean
  • Default: false
  • Example: protocol.compress = \"true\"

Compresses response bodies automatically when the client supports it.

"},{"location":"listeners/listen-http/#http-listener-configuration-examples","title":"HTTP listener configuration examples","text":"

The examples shown below are included in the provided default configuration file.

"},{"location":"listeners/listen-http/#example-1-bosh-and-ws","title":"Example 1. BOSH and WS","text":"

The following listener accepts BOSH and WebSocket connections and has TLS configured.

[[listen.http]]\n  port = 5285\n  tls.certfile = \"mycert.pem\"\n  tls.keyfile = \"mykey.pem\"\n  tls.password =  \"secret\"\n\n  [[listen.http.handlers.mod_bosh]]\n    host = \"_\"\n    path = \"/http-bind\"\n\n  [[listen.http.handlers.mod_websockets]]\n    host = \"_\"\n    path = \"/ws-xmpp\"\n
"},{"location":"listeners/listen-http/#example-2-admin-graphql-api","title":"Example 2. Admin GraphQL API","text":"

GraphQL API for administration, the listener is bound to 127.0.0.1 for increased security. The number of acceptors and connections is specified (reduced).

[[listen.http]]\n  ip_address = \"127.0.0.1\"\n  port = 5551\n  transport.num_acceptors = 5\n  transport.max_connections = 10\n\n  [[listen.http.handlers.mongoose_graphql_handler]]\n    host = \"localhost\"\n    path = \"/api/graphql\"\n    schema_endpoint = \"admin\"\n    username = \"admin\"\n    password = \"secret\"\n    allowed_categories = [\"server\", \"last\", \"vcard\"]\n
"},{"location":"listeners/listen-http/#example-3-domain-admin-graphql-api","title":"Example 3. Domain Admin GraphQL API","text":"

GraphQL API for the domain admin.

[[listen.http]]\n  ip_address = \"0.0.0.0\"\n  port = 5541\n  transport.num_acceptors = 10\n  transport.max_connections = 1024\n\n  [[listen.http.handlers.mongoose_graphql_handler]]\n    host = \"_\"\n    path = \"/api/graphql\"\n    schema_endpoint = \"domain_admin\"\n
"},{"location":"listeners/listen-http/#example-4-user-graphql-api","title":"Example 4. User GraphQL API","text":"

GraphQL API for the user.

[[listen.http]]\n  ip_address = \"0.0.0.0\"\n  port = 5561\n  transport.num_acceptors = 10\n  transport.max_connections = 1024\n\n  [[listen.http.handlers.mongoose_graphql_handler]]\n    host = \"_\"\n    path = \"/api/graphql\"\n    schema_endpoint = \"user\"\n
"},{"location":"listeners/listen-http/#example-5-admin-rest-api","title":"Example 5. Admin REST API","text":"

REST API for administration, the listener is bound to 127.0.0.1 for increased security. The number of acceptors and connections is specified (reduced). Basic HTTP authentication is used as well.

[[listen.http]]\n  ip_address = \"127.0.0.1\"\n  port = 8088\n  transport.num_acceptors = 5\n  transport.max_connections = 10\n\n  [[listen.http.handlers.mongoose_admin_api]]\n    host = \"localhost\"\n    path = \"/api\"\n    username = \"admin\"\n    password = \"secret\"\n
"},{"location":"listeners/listen-http/#example-6-client-rest-api","title":"Example 6. Client REST API","text":"

REST API for clients.

[[listen.http]]\n  port = 8089\n  transport.max_connections = 1024\n  protocol.compress = true\n\n  [[listen.http.handlers.mongoose_client_api]]\n    host = \"_\"\n    path = \"/api\"\n
"},{"location":"listeners/listen-s2s/","title":"Server to server (S2S): [[listen.s2s]]","text":"

Handles incoming server-to-server (S2S) connections (federation). The recommended port number for an S2S listener is 5269 as registered in the XMPP protocol.

Note

Many S2S options are configured in the s2s section of the configuration file, and they apply to both incoming and outgoing connections.

"},{"location":"listeners/listen-s2s/#configuration-options","title":"Configuration options","text":""},{"location":"listeners/listen-s2s/#listens2sshaper","title":"listen.s2s.shaper","text":"
  • Syntax: string, name of the shaper rule or \"none\"
  • Default: \"none\" - no shaper
  • Example: shaper = \"s2s_shaper\"

Name of the rule that determines what traffic shaper is used to limit the incoming XMPP traffic to prevent the server from being flooded with incoming data. The rule referenced here needs to be defined in the access config section, and it should return the shaper name or the value \"none\".

"},{"location":"listeners/listen-s2s/#tls-options-for-s2s","title":"TLS options for S2S","text":"

S2S connections do not use TLS encryption unless enabled with the use_starttls option in the s2s section. You can specify additional options of the TLS encryption in the tls subsection of the listener configuration. Accepted options are: verify_mode, certfile, cacertfile, dhfile, ciphers and protocol_options. They have the same semantics as the corresponding c2s options for fast_tls.

"},{"location":"listeners/listen-s2s/#s2s-listener-configuration-example","title":"S2S listener configuration example","text":"

The following section configures an S2S listener with some basic settings set up. The s2s_shaper access rule is used, which requires a definition in the access section.

[[listen.s2s]]\n  port = 5269\n  shaper = \"s2s_shaper\"\n  max_stanza_size = 131072\n  tls.dhfile = \"dh_server.pem\"\n
"},{"location":"migrations/3.1.1_3.2.0/","title":"3.1.1 to 3.2.0","text":""},{"location":"migrations/3.1.1_3.2.0/#odbc-renamed-to-rdbms-in-module-names-and-options","title":"odbc renamed to rdbms in module names and options","text":"
  • For MongooseIM users: simply replace all instances of odbc in your config files with rdbms. E.g. {auth_method, odbc}. would now be {auth_method, rdbms}.. It's also important to note that all metrics that previously contained odbc in their names have also been renamed to contain rdbms instead.

Please note that odbc_server has been completely replaced with new outgoing_pools (see one of the next sections of this document) config element.

  • For developers calling MongooseIM modules: most modules, functions and atoms had odbc in their names replaced with rdbms. The only exceptions to this rule were names actually pertaining to the ODBC driver, e.g. mongoose_rdbms_odbc.
"},{"location":"migrations/3.1.1_3.2.0/#ejabberdcfg-renamed-to-mongooseimcfg","title":"ejabberd.cfg renamed to mongooseim.cfg","text":"

Rename the existing config file of MongooseIM from ejabberd.cfg to mongooseim.cfg.

"},{"location":"migrations/3.1.1_3.2.0/#pools-configuration","title":"Pools configuration","text":"

Configuring pools to external services has changed, please see Outgoing Connection doc for more details.

Note

Keep in mind that outgoing_pools is a list of pools, it may turn out that you will have more than one entry in the list when more than a single outgoing pool is needed.

"},{"location":"migrations/3.1.1_3.2.0/#example-old-format","title":"Example - Old format","text":"
{elasticsearch_server, [{host, \"elastic.host.com\"}, {port, 9042}]}.\n{riak_server, [{pool_size, 20}, {address, \"127.0.0.1\"}, {port, 8087}, {riak_pb_socket_opts, []}]}.\n{http_connections, [{conn1, [{server, \"http://server:8080\"}, {pool_size, 50}]} ]}.\n{cassandra_servers, [\n  {default, 100,\n   [\n    {servers,\n     [\n      {\"cassandra_server1.example.com\", 9042},\n      {\"cassandra_server2.example.com\", 9042},\n      {\"cassandra_server3.example.com\", 9042},\n      {\"cassandra_server4.example.com\", 9042}\n     ]\n    },\n    {keyspace, \"big_mongooseim\"}\n   ]\n  }\n ]\n}.\n
"},{"location":"migrations/3.1.1_3.2.0/#example-new-format","title":"Example - New format","text":"

This section provides direct \"translation\" of configuration from \"Old format\" section.

{outgoing_pools, [\n  {elastic, global, default, [], [{host, \"elastic.host.com\"}, {port, 9042}]},\n  {riak, global, default, [{workers, 20}], [{address, \"127.0.0.1\"}, {port, 8087}]},\n  {http, global, conn1, [{workers, 50}], [{server, \"http://server:8080\"}]},\n  {cassandra, global, default, [{workers, 100}], [\n        {servers, [\n          {\"cassandra_server1.example.com\", 9042},\n          {\"cassandra_server2.example.com\", 9042},\n          {\"cassandra_server3.example.com\", 9042},\n          {\"cassandra_server4.example.com\", 9042}\n        ]},\n        {keyspace, \"big_mongooseim\"}\n    ]}\n]}.\n
"},{"location":"migrations/3.1.1_3.2.0/#rdbms-configuration-migration","title":"RDBMS configuration migration","text":"

RDBMS pools are no longer configured by a {pool, odbc, _} tuple, instead using the generic outgoing pools mechanism. The connection configuration is now passed via server option of the pool instead of being configured via a top-level {odbc_server, _} tuple. Similarly, the number of workers is no longer configured by odbc_pool_size, and the default pool no longer set by odbc_pool. A top-level odbc_keepalive_interval is now also specified as an option for a specific pool. For example:

{odbc_pool_size, 10}.\n{pool, odbc, default}.\n{odbc_server_type, mssql}.\n{odbc_server, \"DSN=mongoose-mssql;UID=sa;PWD=mongooseim_secret+ESL123\"}.\n{odbc_keepalive_interval, 10}.\n

will now become:

{rdbms_server_type, mssql}.\n{outgoing_pools, [\n {rdbms, global, default, [{workers, 10}],\n  [{server, \"DSN=mongoose-mssql;UID=sa;PWD=mongooseim_secret+ESL123\"}, {keepalive_interval, 10}]}\n]}.\n

Note that odbc_server_type was only renamed to rdbms_server_type and still remains a top-level configuration value.

"},{"location":"migrations/3.1.1_3.2.0/#sm_backend","title":"sm_backend","text":"

If you had the sm_backend set to redis like below:

{sm_backend, {redis, [{pool_size, 3}, {worker_config, [{host, \"localhost\"}, {port, 6379}]}]}}.\n

The pool needs to be defined inside outgoing_pools like this:

{outgoing_pools, [\n {redis, global, default, [{workers, 3}],\n  [{host, \"localhost\"},\n   {port, 6379}]}\n]}.\n

and the sm_backend configuration needs to changed to just:

{sm_backend, {redis, []}}.\n
"},{"location":"migrations/3.1.1_3.2.0/#mod_global_distrib","title":"mod_global_distrib","text":"

If you had mod_global_distrib configured in the following way:

{mod_global_distrib, [\n        (...)\n        {redis, [\n              {pool_size, 24},\n              {server, \"172.16.0.3\"}\n             ]}\n       ]}\n

The redis pool needs to be defined inside outgoing_pools:

{outgoing_pools, [\n {redis, global, global_distrib, [{workers, 24}], [{host, \"172.16.0.3\"}]}\n]}.\n
"},{"location":"migrations/3.3.0_3.4.0/","title":"3.3.0 to 3.4.0","text":""},{"location":"migrations/3.3.0_3.4.0/#new-field-in-message-archive-management-muc-entries-sender-id","title":"New field in Message Archive Management MUC entries: Sender ID","text":"

As a part of ensuring GDPR compliance, it is essential to be able to efficiently query MAM MUC data via sender ID (to retrieve user's personal data). Originally, the sender JID could be found only as a part of an encoded XML message element, so finding all items sent by a certain user would be extremely inefficient (or rather: anti-efficient). MongooseIM 3.4.0 uses a modified schema for MAM MUC backends which enables a more efficient extraction.

Below you may find migration instructions specific to your MAM backend.

"},{"location":"migrations/3.3.0_3.4.0/#rdbms","title":"RDBMS","text":""},{"location":"migrations/3.3.0_3.4.0/#step-1","title":"Step 1","text":"

Please execute the following SQL statements on your MIM database:

MySQL

ALTER TABLE mam_muc_message ADD COLUMN sender_id INT UNSIGNED;\nCREATE INDEX i_mam_muc_message_sender_id USING BTREE ON mam_muc_message(sender_id);\n

PostgreSQL

ALTER TABLE mam_muc_message ADD COLUMN sender_id INT;\nCREATE INDEX i_mam_muc_message_sender_id ON mam_muc_message USING BTREE (sender_id);\n

MSSQL

ALTER TABLE [dbo].[mam_muc_message] ADD sender_id bigint;\nCREATE INDEX i_mam_muc_message_sender_id ON mam_muc_message(sender_id);\n
"},{"location":"migrations/3.3.0_3.4.0/#step-2","title":"Step 2","text":"

Now you have a schema that is compatible with MIM 3.4.0 but isn't GDPR-compliant yet because the new column has no meaningful data.

Please pick your favourite scripting/programming language and populate the new column with the help of a dedicated script. You'll need to iterate over the whole mam_muc_message table with the following algorithm:

  1. Provide message column content to the script.
  2. The script returns sender's JID as username@server string. You need to split it to get a separate username and server.
  3. Select ID from mam_server_user by the username and server. If it doesn't exist, insert a new one (id column is automatically incremented).
  4. Update the sender_id column in mam_muc_message with the retrieved ID.
"},{"location":"migrations/3.3.0_3.4.0/#cassandra","title":"Cassandra","text":""},{"location":"migrations/3.3.0_3.4.0/#step-1_1","title":"Step 1","text":"

Please execute the following CQL statements on your MIM database:

USE mongooseim;\nALTER TABLE mam_muc_message ADD from_jid varchar;\nCREATE INDEX ON mam_muc_message (from_jid);\nDESC mam_muc_message;\n
"},{"location":"migrations/3.3.0_3.4.0/#step-2_1","title":"Step 2","text":"

Now you have a schema that is compatible with MIM 3.4.0 but isn't GDPR-compliant yet because the new column has no meaningful data.

Please pick your favourite scripting/programming language and populate the new column with the help of a dedicated script. You'll need to iterate over the whole mam_muc_message table with the following algorithm:

  1. Extract the whole mam_muc_message table. Please make sure to use the paging feature of your Cassandra client, as the MAM tables tend to be very large.
    SELECT * FROM mam_muc_message;\n
  2. To make data extraction faster, MongooseIM stores 2 copies of the message in the table:
    cqlsh:mongooseim> SELECT * FROM mam_muc_message WHERE id = 399582233150625537 ALLOW FILTERING;\n\n room_jid                      | with_nick | id                 | from_jid | message                        | nick_name\n-------------------------------+-----------+--------------------+----------+--------------------------------+-----------\n room-ad1d999b9e@muc.localhost |           | 399582233150625537 |     null | 0x8350000001...998de2fa8426837 |       Sid\n room-ad1d999b9e@muc.localhost |       Sid | 399582233150625537 |     null | 0x8350000001...998de2fa8426837 |       Sid\n
  3. The copy with an empty with_nick column must be updated.
  4. Extract the sender's JID from the message column in the same way as described in the RDBMS migration section. By default cassandra backend uses the eterm format.
  5. Update the from_jid column with the value of the extracted sender's JID:
    cqlsh:mongooseim> UPDATE mam_muc_message SET from_jid = 'username@server' WHERE id = 399582233150625537  AND with_nick = '' AND room_jid = 'room-ad1d999b9e@muc.localhost';\ncqlsh:mongooseim> SELECT * FROM mam_muc_message WHERE id = 399582233150625537 ALLOW FILTERING;\n\n room_jid                      | with_nick | id                 | from_jid        | message                        | nick_name\n-------------------------------+-----------+--------------------+-----------------+--------------------------------+-----------\n room-ad1d999b9e@muc.localhost |           | 399582233150625537 | username@server | 0x8350000001...998de2fa8426837 |       Sid\n room-ad1d999b9e@muc.localhost |       Sid | 399582233150625537 |            null | 0x8350000001...998de2fa8426837 |       Sid\n
"},{"location":"migrations/3.3.0_3.4.0/#riak","title":"Riak","text":"

Changes to Riak schema are backward compatible with the current MongooseIM release. This means that skipping the migration will cause only some of the new features (namely GDPR data retrieval) to not work correctly.

"},{"location":"migrations/3.3.0_3.4.0/#step-1_2","title":"Step 1","text":"

Please update the Riak schema:

# Set the RIAK_HOST to your Riak HTTP endpoint\n# Set the RIAK_MAM_SCHEMA_PATH to point to new schema path, which\n# by default is: RIAK_MAM_SCHEMA_PATH=tools/mam_search_schema.xml\ncurl -v -XPUT $RIAK_HOST/search/schema/mam \\\n    -H 'Content-Type:application/xml' \\\n    --data-binary @${RIAK_MAM_SCHEMA_PATH}\n

After that we need to either reload all Riak nodes (restart them) or manually reload the schema on live nodes. Reloading the schema on live nodes requires access to Erlang Shell of one of the Riak nodes (any of them). The instruction on how to get to Riak's Erlang shell is beyond this guide, but if you manage to get to it, just call:

yz_index:reload(<<\"mam\">>).\n
"},{"location":"migrations/3.3.0_3.4.0/#step-2_2","title":"Step 2","text":"

After the schema is posted and reloaded, all \"new\" objects will be indexed properly as long they contain 2 new fields: msg_owner_jid and mam_type. The new MongooseIM code will insert both of them for all new MAM entries, but for all existing ones need to have the fields added. In order to do that, we need to create a migration script (just pick your favourite scripting/programming language) that will do the following for each object in each bucket of type mam_yz (the object will be referred as obj):

  • Use this dedicated script to convert the obj.packet_register field value into a so called $SENDER_JID.
  • If the script returns $SENDER_JID correctly:
  • set obj.mam_type = 'muc'
  • set obj.msg_owner_jid = $SENDER_JID
  • If the script returns error code -2
  • set obj.mam_type = 'pm'
  • based on obj_yz_rk formatted as $LOCAL_JID/$REMOTE_JID/$MSG_ID, set obj.msg_owner_jid = $LOCAL_JID
  • Save the modified obj
"},{"location":"migrations/3.3.0_3.4.0/#elasticsearch","title":"ElasticSearch","text":""},{"location":"migrations/3.3.0_3.4.0/#step-1_3","title":"Step 1","text":"

Please update the mapping for muc_messages:

PUT muc_messages/_mapping/muc\n{\n  \"properties\": {\n    \"mam_id\": {\n      \"type\": \"long\"\n    },\n    \"room\": {\n      \"type\": \"keyword\"\n    },\n    \"from_jid\" : {\n      \"type\": \"keyword\"\n    },\n    \"source_jid\": {\n      \"type\": \"keyword\"\n    },\n    \"message\": {\n      \"type\": \"text\",\n      \"index\": false\n    },\n    \"body\": {\n      \"type\": \"text\",\n      \"analyzer\": \"english\"\n    }\n  }\n}\n
"},{"location":"migrations/3.3.0_3.4.0/#step-2_3","title":"Step 2","text":"

Now you have a schema that is compatible with MIM 3.4.0 but isn't GDPR-compliant yet because the new field has no meaningful data.

Please pick your favourite scripting/programming language and populate the new column with the help of a dedicated script. You'll need to iterate over the all muc_messages documents with the following algorithm:

  1. Extract some documents (notice the size parameter) for conversion:
    GET muc_messages/_search/?size=100&q=!_exists_:from_jid\n
  2. Extract the sender's JID from the message field in the same way as described in the RDBMS migration section. Elasticsearch backend uses exclusively the xml format.
  3. Update the from_jid column with the value of the extracted sender's JID:
    POST localhost:9200/muc_messages/muc/%_id%/_update\n{\n  \"doc\": {\n    \"from_jid\" : \"%sender's jid%\"\n  }\n}\n
  4. Repeat all the actions until the full conversion of the database is done.
"},{"location":"migrations/3.5.0_3.6.0/","title":"3.5.0 to 3.6.0","text":""},{"location":"migrations/3.5.0_3.6.0/#push-notifications","title":"Push notifications","text":"

In this version, push notifications work with MongoosePush 2.0.0 and its API v3 by default.

"},{"location":"migrations/3.5.0_3.6.0/#push-notifications-are-send-from-the-servers-jid","title":"Push notifications are send from the server's JID","text":"

Since this version, MongooseIM sends the PubSub publish request to push notifications node from the server's JID. Previously the publish request was sent from the user's JID. If the push PubSub node was created with pubsub#access_mode set to whitelist and pubsub#publish_model set to publishers, now the server's JID needs to be added to the push node in order to send the push notifications successfully.

It can be done by sending the following request from the push node's owner:

<iq to='pubsub.mypubsub'\n    type='set'\n    id='wy6Hibg='\n    from='alice@wonderland.com/resource'>\n    <pubsub xmlns='http://jabber.org/protocol/pubsub#owner'>\n        <affiliations node='punsub_node_for_my_private_iphone'>\n            <affiliation jid='mychat.com' affiliation='publish-only'/>\n        </affiliations>\n    </pubsub>\n</iq>\n
"},{"location":"migrations/3.5.0_3.6.0/#mod_push-module-is-no-longer-available","title":"mod_push module is no longer available","text":"

mod_push has been deprecated since MongooseIM 2.1.1 and it is no longer present in this release. Please use the push backend for mod_event_pusher, which is the direct equivalent of mod_push.

"},{"location":"migrations/3.5.0_3.6.0/#different-muc-light-room-schema-definition","title":"Different MUC Light room schema definition","text":"

We have introduced a change that enforces defining fields with default values. The previous setup led to problems with the RDBMS backend as separating MUC Light options for the schema from the default values was unintuitive. In a specific case when the default config was a subset of the schema and the client failed to provide these values when a room was created, MUC Light stored the incomplete config in the table. Then the missing config fields could not be supplied by the clients. If you've experienced this issue, a way to fix it is described in the Known issues page.

The current method makes it impossible to make the same mistake, as it disallows field definition without any default value.

"},{"location":"migrations/3.5.0_3.6.0/#what-has-changed-for-administrators","title":"What has changed? - for administrators","text":"
  • It's no longer possible to declare a room config field only with its name.
  • There is no default_config option anymore.
  • Declaring a field name and type without an atom key is no longer supported.
"},{"location":"migrations/3.5.0_3.6.0/#example-1","title":"Example 1","text":"

Old config:

{config_schema, [\n                 \"roomname\",\n                 \"subject\",\n                 \"background\",\n                 \"notification_sound\"\n                ]},\n{default_config, [\n                  {\"roomname\", \"The room\"},\n                  {\"subject\", \"Chit-chat\"}\n                 ]}\n

New config:

{config_schema, [\n                 {\"roomname\", \"The room\"},\n                 {\"subject\", \"Chit-chat\"},\n                 {\"background\", \"\"},\n                 {\"notification_sound\", \"\"}\n                ]}\n
"},{"location":"migrations/3.5.0_3.6.0/#example-2","title":"Example 2","text":"

Old config:

{config_schema, [\n                 \"roomname\",\n                 {\"subject\", binary},\n                 {\"priority\", priority, integer},\n                 {\"owners-height\", owners_height, float}\n                ]},\n{default_config, [\n                  {\"roomname\", \"The room\"},\n                  {\"subject\", \"Chit-chat\"},\n                  {\"priority\", 10}]}\n

New config:

{config_schema, [\n                 {\"roomname\", \"The room\"},\n                 {\"subject\", \"Chit-chat\"},\n                 {\"priority\", 10, priority, integer},\n                 {\"owners-height\", 180.0, owners_height, float}\n                ]}\n
"},{"location":"migrations/3.5.0_3.6.0/#what-has-changed-for-developers","title":"What has changed? - for developers","text":"

The room config schema is currently stored in a completely different data structure, so if you have any custom modules that use it, you'll need to adjust them. Additionally, all definitions and the room config API have been extracted from mod_muc_light.hrl and mod_muc_light_utils.erl into mod_muc_light_room_config.erl module.

For more information, please check the specs for types and functions in the aforementioned file.

"},{"location":"migrations/3.5.0_3.6.0/#what-hasnt-changed","title":"What hasn't changed?","text":"
  • The default room config is still the same, i.e. roomname (default: \"Untitled\") and subject (empty string).
  • The room config representation in databases (both Mnesia and RDBMS) is the same; no need for migration.
"},{"location":"migrations/3.5.0_3.6.0/#offline-storage","title":"Offline storage","text":"

In this version the offline storage entries contain one additional information for internal use. Riak and mnesia backends don't require any changes when upgrading to this version. In case of the RDBMS backends, a new column needs to be added. Below there are MySQL, PgSQL and MSSQL queries which can be used to add the new column.

MySQL

ALTER TABLE offline_message ADD COLUMN permanent_fields mediumblob;\n

PostgreSQL

ALTER TABLE offline_message ADD COLUMN permanent_fields bytea;\n

MSSQL

ALTER TABLE [dbo].[offline_message] ADD permanent_fields varbinary(max);\n
"},{"location":"migrations/3.5.0_3.6.0/#persistent-cluster-id","title":"Persistent Cluster ID","text":"

In this version, a new cluster ID has been created, to correctly identify the lifetime of a cluster, across restarts and nodes joining and leaving. This is used for example by System Metrics. This cluster ID is persisted in RDBMS, when an RDBMS database is available, but a new table is required:

MySQL

CREATE TABLE mongoose_cluster_id (k varchar(50) PRIMARY KEY, v text);\n

PostgreSQL

CREATE TABLE mongoose_cluster_id (k varchar(50) PRIMARY KEY, v text);\n

MSSQL

CREATE TABLE mongoose_cluster_id (k varchar(50) NOT NULL PRIMARY KEY, v text);\n

"},{"location":"migrations/3.6.0_3.7.0/","title":"3.6.0 to 3.7.0","text":""},{"location":"migrations/3.6.0_3.7.0/#extended-scram-sha-support","title":"Extended SCRAM-SHA Support","text":"

Since this version, SCRAM authentication mechanisms were extended to support additional hashing algorithms. So far only SHA-1 was available for hashing and now SHA-224, SHA-256, SHA-384 and SHA-512 are also supported. This includes the authentication mechanisms and the password format that is stored. Please note that enabling and using this functionality might require adjusting the server setup.

"},{"location":"migrations/3.6.0_3.7.0/#sasl-mechanisms","title":"SASL mechanisms","text":"

The possible list of allowed SALS mechanisms was changed. We've added new and more secure methods that can be used during stream negotiation.

Please note that if you were using the following in the configurations file

{sasl_mechanisms, [cyrsasl_scram]}

using cyrsasl_scram as sasl_mechanism is now incorrect. You can achieve the same result of allowing the usage of SHA-1 with SCRAM authentication mechanism with:

{sasl_mechanisms, [cyrsasl_scram_sha1]}

You can also specify a list of all supported SCRAM-SHA mechanisms with:

{sasl_mechanisms, [cyrsasl_scram_sha1, cyrsasl_scram_sha224, cyrsasl_scram_sha256, cyrsasl_scram_sha384, cyrsasl_scram_sha512, cyrsasl_scram_sha1_plus, cyrsasl_scram_sha224_plus, cyrsasl_scram_sha256_plus, cyrsasl_scram_sha384_plus, cyrsasl_scram_sha512_plus]}

Before setting up this configuration, please make sure that the client application is capable of authenticating with a selected set of authentication mechanisms. For more details please refer to the authentication section.

"},{"location":"migrations/3.6.0_3.7.0/#scram-password-format","title":"SCRAM password format","text":"

To complement the extensions of the authentication mechanisms, the SCRAM password format was also updated. Please note that SCRAM is now the default password format. While it is still possible to configure the password storage in plaintext format, we highly discourage doing so for security reasons. Changing the default of this option can lead to unexpected behaviours, so if after the upgrade you encounter issues with authenticating the users, please check the conifg file. If you are missing any of the following configuration lines:

{password_format, scram} or {password_format, plain}

it means that you were using the default plaintext format.

Since the default of the password format has changed, your MongooseIM server thinks that the plaintext passwords are stored as SCRAM hashes. This can lead to users failing to authenticate.

If you are still using the plaintext password format, please consider migrating your password storage to store scram hashes instead. Using the plaintext password format is still possible to support legacy installations and to ease the debugging while developing new features. Should you want to continue using the plaintext password format please add the following in the auth_opts:

{password_format, plain}

Legacy plaintext and SCRAM formats are still supported. Nonetheless, please note that if you were using SCRAM as a password format, this meant that SHA-1 was used as the hashing algorithm. This allowed authenticating with PLAINTEXT and SCRAM-SHA-1.

In the new setup the user will still authenticate with those mechanisms given the possible slight syntax change explained above.

However, mixing of the old password format with the new authentication mechanisms can lead to conflicting situations where:

  1. A user wants to authenticate with e.g. SCRAM-SHA-256.
  2. His old password format is only storing SHA-1 password hash.
  3. The authentication fails as it is not possible to derive SHA-256 hash from SHA-1.

If you want to use the new password format with a full set of supported SHA hashes, a password change is required to calculate all the new SHA hashes. Otherwise, please make sure that you provide the right sasl_mechanism configuration, where the mechanism you authenticate with is compatible with the password format you store.

For more details related to the new password format, please refer to authentication and SCRAM serialization sections.

"},{"location":"migrations/3.6.0_3.7.0/#message-retraction","title":"Message retraction","text":"

If you are using MAM with RDBMS, please update your database schema with the following queries. This change is necessary as the support for XEP-0424: Message Retraction requires a new column for the origin_id attribute of MAM messages, which allows MAM to identify the messages to retract. Indexes for this column are required for efficient queries. Only the messages stored after this change can be retracted.

MySQL

ALTER TABLE mam_message ADD COLUMN origin_id varchar(250) CHARACTER SET binary;\nCREATE INDEX i_mam_message_username_jid_origin_id USING BTREE ON mam_message (user_id, remote_bare_jid, origin_id);\n\nALTER TABLE mam_muc_message ADD COLUMN origin_id varchar(250) CHARACTER SET binary;\nCREATE INDEX i_mam_muc_message_room_id_sender_id_origin_id USING BTREE ON mam_muc_message (room_id, sender_id, origin_id);\n

PostgreSQL

ALTER TABLE mam_message ADD COLUMN origin_id varchar;\nCREATE INDEX i_mam_message_username_jid_origin_id ON mam_message USING BTREE (user_id, remote_bare_jid, origin_id);\n\nALTER TABLE mam_muc_message ADD COLUMN origin_id varchar;\nCREATE INDEX i_mam_muc_message_room_id_sender_id_origin_id ON mam_muc_message USING BTREE (room_id, sender_id, origin_id);\n

MSSQL

Note

i_mam_message_username_jid_id was missing from the schema, this is now fixed. It is not required by message retraction, but this change is recommended.

ALTER TABLE mam_message ADD origin_id nvarchar(250) NULL;\nCREATE INDEX i_mam_message_username_jid_id ON mam_message (user_id, remote_bare_jid, id);\nCREATE INDEX i_mam_message_username_jid_origin_id ON mam_message (user_id, remote_bare_jid, origin_id);\n\nALTER TABLE mam_muc_message ADD origin_id nvarchar(250) NULL;\nCREATE INDEX i_mam_muc_message_room_id_sender_id_origin_id ON mam_muc_message (room_id, sender_id, origin_id);\n
"},{"location":"migrations/3.6.0_3.7.0/#rdbms-backend-for-multi-user-chats-muc","title":"RDBMS backend for Multi-User Chats (MUC)","text":"

If you're planning to use the new RDBMS backend for MUC, note that the following tables need to be added to the schema:

MySQL

CREATE TABLE muc_rooms(\n    id SERIAL,\n    muc_host VARCHAR(250)   NOT NULL,\n    room_name VARCHAR(250)       NOT NULL,\n    options JSON            NOT NULL,\n    PRIMARY KEY (muc_host, room_name)\n);\n\nCREATE TABLE muc_room_aff(\n    room_id BIGINT          NOT NULL REFERENCES muc_rooms(id),\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    resource VARCHAR(250)   NOT NULL,\n    aff SMALLINT            NOT NULL\n);\n\nCREATE INDEX i_muc_room_aff_id ON muc_room_aff (room_id);\n\nCREATE TABLE muc_registered(\n    muc_host VARCHAR(250)   NOT NULL,\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    nick VARCHAR(250)       NOT NULL,\n    PRIMARY KEY (muc_host, luser, lserver)\n);\n

PostgreSQL

CREATE TABLE muc_rooms(\n    id BIGSERIAL            NOT NULL UNIQUE,\n    muc_host VARCHAR(250)   NOT NULL,\n    room_name VARCHAR(250)       NOT NULL,\n    options JSON            NOT NULL,\n    PRIMARY KEY (muc_host, room_name)\n);\n\nCREATE TABLE muc_room_aff(\n    room_id BIGINT          NOT NULL REFERENCES muc_rooms(id),\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    resource VARCHAR(250)   NOT NULL,\n    aff SMALLINT            NOT NULL\n);\n\nCREATE INDEX i_muc_room_aff_id ON muc_room_aff (room_id);\n\nCREATE TABLE muc_registered(\n    muc_host VARCHAR(250)   NOT NULL,\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    nick VARCHAR(250)       NOT NULL,\n    PRIMARY KEY (muc_host, luser, lserver)\n);\n

MSSQL

CREATE TABLE muc_rooms(\n    id BIGINT IDENTITY(1,1) NOT NULL UNIQUE,\n    muc_host VARCHAR(250)   NOT NULL,\n    room_name VARCHAR(250)       NOT NULL,\n    options VARCHAR(MAX)    NOT NULL,\n    PRIMARY KEY (muc_host, room_name)\n);\n\nCREATE TABLE muc_room_aff(\n    room_id BIGINT          NOT NULL REFERENCES muc_rooms(id),\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    resource VARCHAR(250)   NOT NULL,\n    aff SMALLINT            NOT NULL\n);\n\nCREATE INDEX i_muc_room_aff_id ON muc_room_aff (room_id);\n\nCREATE TABLE muc_registered(\n    muc_host VARCHAR(250)   NOT NULL,\n    luser VARCHAR(250)      NOT NULL,\n    lserver VARCHAR(250)    NOT NULL,\n    nick VARCHAR(250)       NOT NULL,\n    PRIMARY KEY (muc_host, luser, lserver)\n);\n

"},{"location":"migrations/3.7.0_4.0.0/","title":"3.7.0 to 4.0.0","text":""},{"location":"migrations/3.7.0_4.0.0/#toml-configuration-file","title":"TOML configuration file","text":"

Note that a minor 4.0.1 version has been released with small but important changes to take into account if you're migrating to MongooseIM 4.0.

There is a new TOML configuration file: mongooseim.toml. The legacy mongooseim.cfg file is still supported as an alternative, but deprecated.

You are advised to rewrite your configuration file in the TOML format. Until then, you can still make MongooseIM use the old format by setting the MONGOOSEIM_CONFIG_FORMAT environment variable to cfg:

MONGOOSEIM_CONFIG_FORMAT=cfg mongooseimctl start

"},{"location":"migrations/3.7.0_4.0.0/#changes-in-hooks","title":"Changes in hooks","text":"

If modified the code, e.g. by adding a custom extension module, you might want to update your handlers to the following hooks. You can find them in the mongoose_hooks module.

  • We refactored the MAM (XEP-0313) implementation, replacing the long lists of arguments accepted by the mam_archive_message and mam_muc_archive_message hooks with a more readable key-value structure (a map).
  • The argument list of the failed_to_store_message hook has been changed as a result of code refactoring.
"},{"location":"migrations/3.7.0_4.0.0/#otp-logger-as-the-logging-framework","title":"OTP Logger as the logging framework","text":"

We've transitioned from lager to Logger as our logging framework. No internal changes were introduced, and the default handlers still implement the same behaviour, but the configuration is different, though still done in the same place. To know more, please refer to each framework's documentation.

As an example, for our previous default lager configuration:

 {lager, [\n    %% Make logging more async\n    %% If some very heavy loaded process want to log something, it's better to not block the process.\n    {async_threshold, 2000},\n    {async_threshold_window, 500},\n    %% Kill sink if it has more than 10k messages\n    {killer_hwm, 10000},\n    {killer_reinstall_after, 5000},\n    {log_root, \"log\"},\n    {crash_log, \"crash.log\"},\n    {handlers, [\n        {lager_console_backend, [{level, info}]},\n        {lager_file_backend, [{file, \"ejabberd.log\"}, {level, info}, {size, 2097152}, {date, \"$D0\"}, {count, 5}]}\n    ]}\n  ]}\n

The equivalent Logger configuration is

 {kernel, [\n  {logger_level, warning},\n  {logger, [\n    %% Console logger\n    {handler, default, logger_std_h, #{}},\n    %% Disk logger for errors\n    {handler, disk_log, logger_disk_log_h,\n       #{config => #{\n           file => \"log/mongooseim.log\",\n           type => wrap,\n           max_no_files => 5,\n           max_no_bytes => 2097152,\n           sync_mode_qlen => 2000, % If sync_mode_qlen is set to the same value as drop_mode_qlen,\n           drop_mode_qlen => 2000, % synchronous mode is disabled. That is, the handler always runs\n           flush_qlen => 5000,     % in asynchronous mode, unless dropping or flushing is invoked.\n           overload_kill_enable => true\n         },\n         formatter => {logger_formatter, #{\n           depth => 12,\n           chars_limit => 4096\n         }}\n        }\n    }\n  ]}]}\n

"},{"location":"migrations/4.0.0_4.0.1/","title":"4.0.0 to 4.0.1","text":""},{"location":"migrations/4.0.0_4.0.1/#toml-configuration-file","title":"TOML configuration file","text":"

After the latest MongooseIM 4.0.0 release that announced the new TOML configuration format, we've changed a few configuration formats:

  • Removed the backend option for mod_bosh as \"mnesia\" was the only valid option.
  • Removed the backend option for mod_inbox as \"rdbms\" was the only valid option.
  • Deprecated mod_revproxy, it can now only be configured with the older, .cfg configuration file. Please refer to the older versions of the documentation to see how to do this.
  • For mod_global_distrib:
  • Replaced the bounce option with bounce.enabled for mod_global_distrib. It was a \"boolean with only false being a valid option\" which was very confusing. This was because when someone wanted to have bounce enabled it became a TOML table as opposed to a key. Now there is a switch in the bounce section for this behaviour which keeps the behaviour of having bounce enabled by default.
  • Replaced the tls option with tls.enabled for mod_global_distrib for the same reason. The only issue here is as tls is disabled by default (it needs some options in the tls section to be set), the \"no section - disabled\" approach seems more natural. Just for the consistency, it's changed to be similar to the bounce section in this regard.
"},{"location":"migrations/4.0.0_4.0.1/#mod_http_notification-module-is-no-longer-available","title":"mod_http_notification module is no longer available","text":"

mod_http_notification has been deprecated since MongooseIM 2.1.1 and it is no longer available in this release. Please use the http backend for mod_event_pusher, which is the direct equivalent of mod_http_notification.

"},{"location":"migrations/4.0.0_4.0.1/#metrics","title":"Metrics","text":"

mod_http_notification metric was updated and now is available as mod_event_pusher_http. For more details on how to configure mod_event_pusher with http backend, please see this section.

"},{"location":"migrations/4.0.1_4.1.0/","title":"4.0.1 to 4.1.0","text":""},{"location":"migrations/4.0.1_4.1.0/#http-file-upload","title":"HTTP File Upload","text":"

HTTP File Upload specification older than 0.3.0 is no longer supported, i.e. the one namespaced with urn:xmpp:http:upload. Currently, only the urn:xmpp:http:upload:0 XMLNS is served.

All major, modern client libraries and applications support the 0.3.0+ specification. If you experience any issues with making requests to the HTTP File Upload service, please update your client.

"},{"location":"migrations/4.0.1_4.1.0/#retirement-of-the-old-cfg-format","title":"Retirement of the old *.cfg format","text":"

Since release 4.1.0, we are no longer supporting the *.cfg MongooseIM configuration format. Please use the TOML format instead.

"},{"location":"migrations/4.0.1_4.1.0/#minor-changes-in-the-toml-config-format","title":"Minor changes in the TOML config format","text":"
  • mod_bosh.max_pause instead of maxpause

  • mod_disco.server_info.module: the field is optional, no longer required

  • mod_global_distrib.connections.advertised_endpoints: default not set (false is no longer accepted)

  • mod_global_distrib.connections.tls.enabled: the flag was removed, TLS is enabled by providing the cacertfile and certfile options

  • mod_http_upload.max_file_size: undefined is no longer allowed

  • mod_mam_meta.user_prefs_store: false is no longer allowed

  • mod_muc_light.config_schema: the usage of value and type fields was replaced with one of the following fields: string_value, integer_value or float_value

  • mod_muc_log.css_file: the default value was changed from \"false\" to not set

  • mod_stream_management: minor adjustments of buffer_max and ack_freq options, buffer and ack booleans were added

  • listen.c2s.tls.ciphers, listen.http.tls.ciphers and outgoing_pools.*.*.connection.tls.ciphers: the ciphers should now be formatted as a specification string

  • listen.http.handlers.mod_websockets.ping_rate: none is no longer allowed

"},{"location":"migrations/4.1.0_4.2.0/","title":"4.1.0 to 4.2.0","text":""},{"location":"migrations/4.1.0_4.2.0/#minor-changes-in-the-toml-config-format","title":"Minor changes in the TOML config format","text":"
  • The pgsql_users_number_estimate option was moved to auth.rdbms.users_number_estimate. The new option supports PostgreSQL and MySQL.
"},{"location":"migrations/4.1.0_4.2.0/#db-migrations","title":"DB migrations","text":""},{"location":"migrations/4.1.0_4.2.0/#new-inbox-features","title":"New inbox features","text":"

Inbox now implements new functionality (see inbox), but this required adding new columns to the DB. If you're using inbox, please update the tables as follows:

For Postgres or MySQL:

ALTER TABLE inbox\n  ADD COLUMN archive BOOLEAN DEFAULT false,\n  ADD COLUMN muted_until BIGINT DEFAULT 0;\n
For MSSQL:
ALTER TABLE inbox\n  ADD COLUMN archive TINYINT DEFAULT 0,\n  ADD COLUMN muted_until BIGINT DEFAULT 0;\n

"},{"location":"migrations/4.1.0_4.2.0/#archived-groupchat-messages-in-mod_mam","title":"Archived groupchat messages in mod_mam","text":"

The archive_groupchats option is now set to false by default, as documented. Before the change, the private message (PM) archive stored incoming groupchat messages as well, contrary to the documentation. After the upgrade you can manually remove those messages from the database. For example, when the MUC domain is muc.localhost and rdbms_message_format has the default value internal, one can remove such messages with the following query:

DELETE FROM mam_message\n  WHERE direction = 'I' AND remote_bare_jid LIKE 'muc.localhost:%';\n

This can be a heavy operation and it needs to be done with caution.

"},{"location":"migrations/4.1.0_4.2.0/#using-mod_auth_token-with-mysql-and-ms-sql","title":"Using mod_auth_token with MySQL and MS SQL","text":"

The mod_auth_token module supports MySQL and MS SQL now. To use this functionality, you need to create the auth_token table with the query which you can find in priv/mysql.sql and priv/mssql2012.sql, respectively.

"},{"location":"migrations/4.2.0_5.0.0/","title":"4.2.0 to 5.0.0","text":""},{"location":"migrations/4.2.0_5.0.0/#db-migrations","title":"DB migrations","text":"

The migrations scripts for Postgres, MySQL, MSSQL can be found in the priv/migrations directory. Please remember to provide the existing server domain for the server column instead of the localhost.

"},{"location":"migrations/4.2.0_5.0.0/#changes-in-xeps","title":"Changes in XEPs:","text":"
  • mod_last
    • Table last - added server column, updated primary key and indexes.
  • mod_privacy
    • Table privacy_default_list - added server column, updated primary key and indexes.
    • Table privacy_list - added server column, updated primary key and indexes.
  • mod_private
    • Table private_storage - added server column, updated primary key and indexes, removed unused columns.
  • mod_roster
    • Table rosterusers - added server column, updated indexes.
    • Table rostergroups - added server column, updated indexes.
    • Table roster_version - added server column, updated primary key and indexes.
  • mod_muc
    • Table i_muc_light_blocking - updated indexes.
  • mod_inbox
    • Table inbox - updated primary key and indexes.
"},{"location":"migrations/4.2.0_5.0.0/#other-changes","title":"Other changes:","text":"
  • RDBMS auth - modified users table.
  • Added domain_settings table.
  • Added domain_events table.
"},{"location":"migrations/4.2.0_5.0.0/#config-migrations","title":"Config migrations","text":"

Most important changes without which a server might not run.

"},{"location":"migrations/4.2.0_5.0.0/#section-general","title":"Section general","text":"
  • New mandatory option default_server_domain that must be set. This domain is used as a default when one cannot be determined, for example when sending XMPP stream errors to unauthenticated clients.
  • Option hosts is no longer mandatory, but if omitted, at least one host type has to be defined.
  • New option host_types. If omitted, at least one host has to be defined. This is the list of names for the types of hosts that will serve dynamic XMPP domains.

Simply use hosts if you want to stay with static domains and use host_types for dynamic domains.

"},{"location":"migrations/4.2.0_5.0.0/#section-host_config","title":"Section host_config","text":"
  • Option host specifies the XMPP domain that this section refers to.
  • New option host_type specifies the host type that this section refers to. Either this option or host is mandatory.

For every configured host the host_type of the same name is declared automatically. As host_config section is now used for changing configuration of the host_type, we don't need host option any more. But to stay compatible with an old config format we keep host option as well. Now it is just a synonym to host type.

"},{"location":"migrations/4.2.0_5.0.0/#hook-migrations","title":"Hook migrations","text":"

The support for dynamic domains has resulted in changes in most Hooks. Mostly these changes concern calling hooks for a host_type.

"},{"location":"migrations/4.2.0_5.0.0/#added","title":"Added","text":"
  • adhoc_local_commands/4
  • adhoc_sm_commands/4
  • does_user_exist/3
  • get_room_affiliations/2
  • mam_get_behaviour/4
  • mam_set_prefs/6
  • disco_muc_features/1
  • remove_domain/2
  • node_cleanup/1
  • gen_mod:get_module_opt_by_subhost
"},{"location":"migrations/4.2.0_5.0.0/#removed","title":"Removed","text":"
  • host_config_update/4
  • local_send_to_resource_hook/5
  • muc_room_pid/3
  • amp_check_packet/4
  • amp_error_action_triggered/2
  • amp_notify_action_triggered/2
  • room_send_packet/3
  • caps_update/6
  • caps_add/6
"},{"location":"migrations/4.2.0_5.0.0/#changed","title":"Changed","text":"
  • auth_failed/2 -> auth_failed/3
  • failed_to_store_message/2 -> failed_to_store_message/1
  • filter_local_packet/2 -> filter_local_packet/1
  • get_key/3 -> get_key/2
  • register_command/2 -> register_command/1
  • register_subhost/3 -> register_subhost/2
  • resend_offline_messages_hook/3 -> resend_offline_messages_hook/2
  • rest_user_send_packet/5 -> rest_user_send_packet/4
  • set_vcard/4 -> set_vcard/3
  • unregister_command/2 -> unregister_command/1
  • unregister_subhost/2 -> unregister_subhost/1
  • user_ping_timeout/3 -> user_ping_timeout/2
  • user_send_packet/5 -> user_send_packet/4
  • xmpp_stanza_dropped/5 -> xmpp_stanza_dropped/4
  • c2s_broadcast_recipients/6 -> c2s_broadcast_recipients/4
  • c2s_filter_packet/6 -> c2s_filter_packet/4
  • c2s_presence_in/5 -> c2s_presence_in/4
  • check_bl_c2s/2 -> check_bl_c2s/1
  • session_opening_allowed_for_user/3 -> session_opening_allowed_for_user/2
  • privacy_check_packet/6 -> privacy_check_packet/5
  • privacy_get_user_list/3 -> privacy_get_user_list/2
  • privacy_updated_list/4 -> privacy_updated_list/3
  • offline_groupchat_message_hook/5 -> offline_groupchat_message_hook/4
  • offline_message_hook/5 -> offline_message_hook/4
  • set_presence_hook/5 -> set_presence_hook/3
  • sm_broadcast/6 -> sm_broadcast/5
  • sm_filter_offline_message/5 -> sm_filter_offline_message/4
  • sm_remove_connection_hook/6 -> sm_remove_connection_hook/5
  • unset_presence_hook/5 -> unset_presence_hook/3
  • xmpp_bounce_message/2 -> xmpp_bounce_message/1
  • roster_get/3 -> roster_get/2
  • roster_get_jid_info/4 -> roster_get_jid_info/3
  • roster_get_versioning_feature/2 -> roster_get_versioning_feature/1
  • roster_groups/2 -> roster_groups/1
  • roster_in_subscription/6 -> roster_in_subscription/5
  • roster_out_subscription/5 -> roster_out_subscription/4
  • roster_process_item/2 -> roster_process_item/3
  • roster_push/4 -> roster_push/3
  • roster_set/5 -> roster_set/4
  • is_muc_room_owner/4 -> is_muc_room_owner/3
  • can_access_identity/4 -> can_access_identity/3
  • can_access_room/4 -> can_access_room/3
  • mam_archive_id/3 -> mam_archive_id/2
  • mam_archive_size/4 -> mam_archive_size/3
  • mam_get_behaviour/5 -> mam_get_behaviour/4
  • mam_set_prefs/7 -> mam_set_prefs/6
  • mam_remove_archive/4 -> mam_remove_archive/3
  • mam_lookup_messages/3 -> mam_lookup_messages/2
  • mam_archive_message/3 -> mam_archive_message/2
  • mam_muc_archive_id/3 -> mam_muc_archive_id/2
  • mam_muc_archive_size/4 -> mam_muc_archive_size/3
  • mam_muc_get_behaviour/5 -> mam_muc_get_behaviour/4
  • mam_muc_set_prefs/7 -> mam_muc_set_prefs/6
  • mam_muc_remove_archive/4 -> mam_muc_remove_archive/3
  • mam_muc_lookup_messages/3 -> mam_muc_lookup_messages/2
  • mam_muc_archive_message/3 -> mam_muc_archive_message/2
  • mam_muc_flush_messages/3 -> mam_muc_flush_messages/2
  • get_mam_pm_gdpr_data/3 -> get_mam_pm_gdpr_data/2
  • get_mam_muc_gdpr_data/3 -> get_mam_muc_gdpr_data/2
  • get_personal_data/3 -> get_personal_data/2
  • find_s2s_bridge/3 -> find_s2s_bridge/2
  • s2s_allow_host/3 -> s2s_allow_host/2
  • s2s_connect_hook/3 -> s2s_connect_hook/2
  • s2s_receive_packet/2 -> s2s_receive_packet/1
  • disco_local_identity/6 -> disco_local_identity/1
  • disco_sm_identity/6 -> disco_sm_identity/1
  • disco_local_items/6 -> disco_local_items/1
  • disco_sm_items/6 -> disco_sm_items/1
  • disco_local_features/6 -> disco_local_features/1
  • disco_sm_features/6 -> disco_sm_features/1
  • disco_info/5 -> disco_info/1
  • amp_check_condition/4 -> amp_check_condition/3
  • amp_determine_strategy/6 -> amp_determine_strategy/5
  • amp_verify_support/3 -> amp_verify_support/2
  • forget_room/4 -> forget_room/3
  • invitation_sent/7 -> invitation_sent/6
  • join_room/6 -> join_room/5
  • leave_room/6 -> leave_room/5
  • room_packet/6 -> room_packet/5
  • caps_recognised/5 -> caps_recognised/4
  • pubsub_create_node/6 -> pubsub_create_node/5
  • pubsub_delete_node/5 -> pubsub_delete_node/4
  • pubsub_publish_item/7 -> pubsub_publish_item/6
  • mod_global_distrib_known_recipient/5 -> mod_global_distrib_known_recipient/4
"},{"location":"migrations/4.2.0_5.0.0/#metrics-rest-api-obsolete","title":"Metrics REST API (obsolete)","text":"

The API is still considered obsolete so if you are using it, please consider using WombatOAM or metrics reporters as described in Logging and monitoring.

In each endpoint, host has been changed to host_type. This is because the metrics are now collected per host type rather than host.

"},{"location":"migrations/4.2.0_5.0.0/#users-cache","title":"Users cache","text":"

MongooseIM used to feature a cache to check whether a user exists, that was unavoidably enabled, and had no eviction policy, that is, the cache could continue growing forever. Now, MIM features a module called mod_cache_users that implements a configurable cache policy, that can be enabled, disabled, and parametrised, per host_type. This might not be enabled by default in your configuration, so we recommend you verify your configuration and enable it if needed.

"},{"location":"migrations/5.0.0_5.1.0/","title":"5.0.0 to 5.1.0","text":""},{"location":"migrations/5.0.0_5.1.0/#configuration-changes","title":"Configuration changes","text":"

The configuration format has slightly changed and you might need to amend mongooseim.toml.

"},{"location":"migrations/5.0.0_5.1.0/#section-listen","title":"Section listen","text":"

The options tls.verify_peer and tls.verify_mode are replaced with just tls.verify_mode. You need to amend the affected options for each listener:

  • verify_peer = false (the old default for C2S and HTTP listeners) should be replaced with verify_mode = \"none\".
  • verify_peer = true should be replaced with verify_mode = \"peer\" or just removed, as this is the new default.

There is a new, simplified configuration format for mongoose_client_api. You need to change the listen section unless you have disabled the client API in your configuration file. Consult the option description and the example configuration for details.

"},{"location":"migrations/5.0.0_5.1.0/#section-acl","title":"Section acl","text":"

The implicit check for user's domain in patterns is now configurable and the default behaviour (previously undocumented) is more consistent - the check is always performed unless disabled with match = \"all\". See the description of current_domain for more details.

"},{"location":"migrations/5.0.0_5.1.0/#section-auth","title":"Section auth","text":"
  • Each authentication method needs a TOML section, e.g. if you have the rdbms method enabled, you need to have the [auth.rdbms] section in the configuration file, even if it is empty. The methods option is not required anymore and especially if you are using only one method, you can remove it.
  • The auth.scram_iterations option was moved to auth.password.scram_iterations.

See the auth configuration for details.

"},{"location":"migrations/5.0.0_5.1.0/#section-outgoing_pools","title":"Section outgoing_pools","text":"

The option tls.verify_peer is replaced with tls.verify_mode. You need to change this option for each outgoing connection pool:

  • verify_peer = false (the old default for all pools except Riak) should be replaced with verify_mode = \"none\".
  • verify_peer = true should be replaced with verify_mode = \"peer\" or just removed, as this is the new default.

A few options of the outgoing connection pools were changed for consistency:

  • Cassandra servers: ip_address was renamed to host,
  • RabbitMQ: the amqp_ option prefix was removed,
  • LDAP: rootdn was renamed to root_dn; encrypt was removed (the tls option should be used instead).
"},{"location":"migrations/5.0.0_5.1.0/#section-s2s","title":"Section s2s","text":"
  • All options can be set globally or inside host_config.
  • The host_config.s2s section overrides the whole global section now. Previously only the specified options were overridden.
  • The domain_certfile option has been moved to the general section because it affects c2s connections as well.
"},{"location":"migrations/5.0.0_5.1.0/#section-host_config","title":"Section host_config","text":"

The rules for overriding global options in the host_config section have been simplified. The auth section and the s2s.address and s2s.host_policy options now completely override the corresponding general settings instead of being merged with them.

"},{"location":"migrations/5.0.0_5.1.0/#extension-modules","title":"Extension modules","text":"
  • mod_auth_token has a new configuration format - if you are using this module, amend the validity_period option.
  • mod_event_pusher has an updated configuration format - the backend subsection is removed and the http backend has a new handlers option. Adjust your configuration according to mod_event_pusher documentation.
  • mod_mam_meta does not have the rdbms_message_format and simple options anymore. Use db_jid_format and db_message_format instead.
  • mod_shared_roster_ldap all options have their ldap_ prefix dropped.
  • mod_vcard LDAP options are moved into an LDAP subsection.
"},{"location":"migrations/5.0.0_5.1.0/#async-workers","title":"Async workers","text":"

The async_writer flag of MAM is now a section on its own, that absorbs previous flags related to it: flush_interval, max_batch_size and pool_size now become subelements of the async_writer section, with one more parameter, enabled. Below an example:

[modules.mod_mam_meta]\n  flush_interval = 1000\n  max_batch_size = 100\n  muc.async_writer = false\n
now becomes
[modules.mod_mam_meta]\n  async_writer.flush_interval = 1000\n  async_writer.batch_size = 100\n  muc.async_writer.enabled = false\n

"},{"location":"migrations/5.0.0_5.1.0/#smart-markers","title":"Smart markers","text":"

There's an experimental and undocumented module called mod_smart_markers, that had a default table in the RDBMS schema, which you probably never used (or shouldn't have, as it was undocumented). If you rely on this table, the column from_jid has been split in from_luser and lserver, in order to support the remove_domain callback for the dynamic domains functionality. You might need to migrate it, or simply drop the previously defined table and recreate the new one.

"},{"location":"migrations/5.0.0_5.1.0/#inbox","title":"Inbox","text":"

The archive functionality recently introduced has been extended to support many more boxes. IQ queries can remain as they were, but, a new flag called box is now introduced, and if provided, it takes preference over the old archive flag. The database requires a migration, as the archive is now a column storing the proper name of the box, see the migrations for Postgres, MySQL and MSSQL in the priv/migrations directory.

"},{"location":"migrations/5.0.0_5.1.0/#removal-of-deprecated-modules","title":"Removal of deprecated modules","text":"
  • mod_revproxy - removed from the code base as it was unsupported since 4.2.0.
  • mod_aws_sns - its functionality is fully covered by mod_event_pusher.
"},{"location":"migrations/5.0.0_5.1.0/#internal-module-configuration-rework","title":"Internal module configuration rework","text":"

If you are using your own extension modules (or services), you need to update the code. The most important change is that configuration options were stored in proplists before, and now they are stored in maps, so e.g. the start/2 function of your module should expect a map as the second argument.

"},{"location":"migrations/5.1.0_6.0.0/","title":"5.1.0 to 6.0.0","text":""},{"location":"migrations/5.1.0_6.0.0/#module-configuration","title":"Module configuration","text":"
  • The mod_mam_meta module is now named mod_mam for simplicity, so if you are using this module, you need to update the module name in mongooseim.toml.
  • mod_commands, mod_inbox_commands, mod_muc_commands and mod_muc_light_commands are removed. Their functionality is now fully covered by mongoose_admin_api. You need to delete these modules from mongooseim.toml.
"},{"location":"migrations/5.1.0_6.0.0/#metrics","title":"Metrics","text":"

The mod_mam backend module is now named mod_mam_pm for consistency with mod_mam_muc. As a result, the backend metrics have updated names, i.e. each [backends, mod_mam, Metric] name is changed to [backends, mod_mam_pm, Metric], where Metric can be lookup or archive.

"},{"location":"migrations/5.1.0_6.0.0/#rest-api","title":"REST API","text":"

The whole REST API has been unified and simplified. There are now only two REST API handlers that you can configure in the listen section of mongooseim.toml:

  • mongoose_admin_api handles the administrative API,
  • mongoose_client_api handles the client-facing API.

You need to remove the references to the obsolete handlers (mongoose_api_client, mongoose_api_admin, mongoose_api, mongoose_domain_handler) from your configuration file.

Additionally, all the backend administration endpoints for mod_muc_light require now XMPPMUCHost (MUC subdomain) instead of XMPPHost (domain) and roomID instead of roomName.

For some endpoints, the response messages may be slightly different because of the unification with other APIs.

"},{"location":"migrations/5.1.0_6.0.0/#command-line-interface","title":"Command Line Interface","text":"

For some commands, the response messages may be slightly different because of the unification with other APIs.

"},{"location":"migrations/5.1.0_6.0.0/#dynamic-domains","title":"Dynamic domains","text":"

Removing a domain was a potentially troublesome operation: if the removal was to fail midway through the process, retrials wouldn't be accepted. This is fixed now, by first disabling and marking a domain for removal, then running all the handlers, and only on full success will the domain be removed. So if any failure is notified, the whole operation can be retried again.

The database requires a migration, as the status of a domain takes now more than the two values a boolean allows, moreover the table for data of the domain admin has been added. See the migrations for Postgres, MySQL and MSSQL in the priv/migrations directory.

"},{"location":"migrations/5.1.0_6.0.0/#hooks","title":"Hooks","text":"

Support for ejabberd_hooks has been removed. Now handlers should be compliant with gen_hook. If you have some custom modules (e.g. that implement some XMPP extensions) and use hooks mechanism, please refactor your handlers to be compliant with it. For more details refer to Hooks and Handlers chapter.

"},{"location":"migrations/6.0.0_6.1.0/","title":"6.0.0 to 6.1.0","text":""},{"location":"migrations/6.0.0_6.1.0/#listener-configuration","title":"Listener configuration","text":"

With the new implementation of the client-to-server (C2S) state machine, mongoose_c2s, there are some changes in the listener options:

  • The zlib option for supporting stream compression, which was present in the default configuration file, is removed, and you need to delete it from your listener configuration unless you have already done so. The extension is obsolete due to security vulnerability, and the CRIME vulnerability has been found a long time ago.
  • Support for [listen.http.handlers.mod_websockets.service] has been removed, the component connection over WebSockets did not correspond to any XEP/RFC, and neither it was properly described anywhere in the MIM documentation. It was present in the default configuration file, and you need to delete it from your listener configuration unless you have already done so.
  • The max_fsm_queue option is no longer supported for C2S listeners. It is incompatible with the new gen_statem state machine, and if you need to limit incoming traffic, you should use traffic shapers instead. You need to remove this option from your C2S configuration if you are using it.
  • The default value of the backlog option for all XMPP listeners has been increased from 100 to 1024 for performance reasons.
  • You might be interested in the new C2S listener options: max_connections, c2s_state_timeout, reuse_port and backwards_compatible_session. The first two options can be set for websockets as well.
"},{"location":"migrations/6.0.0_6.1.0/#module-configuration","title":"Module configuration","text":"

The mongoose_c2s module, which provides the core XMPP features, is now separated from modules which used to have their parts hardcoded into the old C2S implementation:

  • Presence handling has been exported to a separate module mod_presence, which is enabled in the default configuration file. Enable mod_presence in your configuration file unless you are sure that you don't need server-side presence handling, in which case you could gain some performance by not using this module.
  • Stream management is now handled completely by mod_stream_management, and if you don't need it, you can now gain more performance than before by disabling it.
  • Client state indication is now handled completely by mod_csi, and if you don't need it, you can now gain more performance than before by disabling it.
"},{"location":"migrations/6.0.0_6.1.0/#database-migration","title":"Database migration","text":"

There is no database migration required when upgrading from version 6.0.0 to 6.1.0.

"},{"location":"migrations/6.0.0_6.1.0/#metrics","title":"Metrics","text":"

The c2s_unauthenticated_iq metric has been removed.

Since we don't know whether a compressed/encrypted packet contains a single stanza or a batch, calculation of histogram metrics for compressed and encrypted streams is inaccurate. The following histogram metrics have been removed: * global.data.xmpp.received.compressed_size - obsolete, stream compression support is removed. * global.data.xmpp.sent.compressed_size - obsolete, stream compression support is removed. * global.data.xmpp.received.encrypted_size - impractical, has no value but consumes calculation resources. * global.data.xmpp.sent.encrypted_size - impractical, has no value but consumes calculation resources.

A set of global.data.xmpp.received.** and global.data.xmpp.sent.** spiral data metrics has been introduced instead.

"},{"location":"migrations/6.0.0_6.1.0/#hooks","title":"Hooks","text":"

Multiple hooks have been added, removed or changed because of the introduction of mongoose_c2s - the most important change is the increased granularity of the user_send_* and user_receive_* hooks. If you have some custom modules (e.g. that implement some XMPP extensions) using the hooks mechanism, please refactor your handlers to be compliant with the new hooks. Refer to Hooks Description and Message routing for more details.

"},{"location":"migrations/6.0.0_6.1.0/#upgrade-procedure","title":"Upgrade procedure","text":"

As long as you are not using Mnesia for persistent storage (it is not recommended to do so), the safest option would be to prepare a new cluster with version 6.1.0, and switch the traffic to it on a load balancer. The only downside is that clients connected to different clusters would see each other as offline. If you are limited to one cluster, it is recommended to do a split-cluster rolling upgrade by removing each node from the cluster before stopping and upgrading it, and gradually forming a new separate cluster from the upgraded nodes. This means that for each newly started node except the first one, you should join one of the previously started nodes.

"},{"location":"migrations/6.0.0_6.1.0/#rolling-upgrade-issues","title":"Rolling upgrade issues","text":"

If you want to perform a typical rolling upgrade instead, there are a few potential issues caused by the introduction of mongoose_c2s. When a node is stopped, upgraded and started again, it reconnects to the cluster. When a stanza is routed between users connected to different nodes of the cluster, an internal message is sent between the nodes. In version 6.1.0 that message has a different format, and routing a stanza between nodes with versions 6.0.0 and 6.1.0 would fail, resulting in a warning message for each stanza. This means that after upgrading the first node you might get a huge amount of warning messages on all nodes, causing a performance drop. What is more, the sender's node would still assume that the recipient is online, and some actions (e.g. responding with the service-unavailable error) would be omitted.

Changing the log level

You can set the log level to error during the upgrade to silence the excess warnings. Before the upgrade, set the log level on all nodes:

mongooseimctl server setLoglevel --level ERROR\n
Before starting the upgraded node, set the loglevel option to error in the configuration file. After the whole upgrade procedure, use mongooseimctl to change the log level back to the previous value (warning by default). Change the values in the configuration files as well to make the setting permanent.

"},{"location":"migrations/6.1.0_6.2.0/","title":"6.1.0 to 6.2.0","text":""},{"location":"migrations/6.1.0_6.2.0/#internal-databases","title":"Internal databases","text":"

So far MongooseIM has been using the internal Mnesia database to replicate the in-memory data between cluster nodes. Now there is an option to use CETS instead. Mnesia is still used by default, so you don't need to change your configuration file. If you want to switch to CETS, see the tutorial and the reference for internal_databases.

"},{"location":"migrations/6.1.0_6.2.0/#database-migration","title":"Database migration","text":"

There is a new table discovery_nodes in the database, which is used by CETS for dynamic discovery of cluster nodes. See the migrations for Postgres, MySQL and MSSQL in the priv/migrations directory. Although the new table is only needed by CETS, we recommend applying the migration anyway to keep the database in sync with the latest schema.

"},{"location":"migrations/6.1.0_6.2.0/#validation-of-tls-options","title":"Validation of TLS options","text":"

Erlang/OTP 26 has more strict checking of the TLS options, as described in release highlights. MongooseIM follows the same rules now, preventing runtime crashes if TLS is misconfigured.

By default verify_mode is set to \"peer\" for each tls section in the configuration, and this requires cacertfile - otherwise the server will refuse to start. This was already documented, but not enforced. The option \"selfsigned_peer\" also requires cacertfile now.

This change affects the following configuration sections:

  • Listeners. Currently, it only affects http and c2s with tls.module set to \"just_tls\", but we recommend fixing it for all listeners already, because in future releases all listeners would have this validation.
  • Outgoing connections.

For each of the affected sections, if there is any tls option present, make sure that either tls.cacertfile is provided or tls.verify_mode is set to \"none\".

"},{"location":"migrations/6.1.0_6.2.0/#transition-to-new-cli-commands","title":"Transition to New CLI Commands","text":"

Legacy CLI commands previously marked as deprecated have now been removed. The users are encouraged to explore the new GraphQL-based CLI. It is recommended to transition to the new CLI commands prior to the next system upgrade. The configuration options general.mongooseimctl_access_commands and services.service_admin_extra related to the legacy CLI were also removed. You need to remove them from your configuration file unless you have already done so.

"},{"location":"migrations/6.1.0_6.2.0/#removed-support-for-riak","title":"Removed support for Riak","text":"

The deprecated and obsolete Riak database is not supported anymore, and you cannot configure it in the outgoing_pools section.

"},{"location":"migrations/6.2.0_6.2.1/","title":"6.2.0 to 6.2.1","text":""},{"location":"migrations/6.2.0_6.2.1/#database-migration","title":"Database migration","text":"

The migration scripts for PostgreSQL, MySQL and MS SQL are in the priv/migrations directory. They are required due to the following changes:

"},{"location":"migrations/6.2.0_6.2.1/#mam-message-improvements","title":"MAM message improvements","text":"

There is a new column in the mam_message table, which is used to support including or excluding groupchat results in a user archive (mod_mam). Please be aware, that the filtering process will only be effective for new messages and will not apply to those messages that have already been stored in the database.

"},{"location":"migrations/6.2.0_6.2.1/#roster","title":"Roster","text":"

mod_roster was internally refactored to modernise and improve the performance of the code, but as a side-effect, some database migrations need to be carried.

"},{"location":"migrations/6.2.0_6.2.1/#cets-node-discovery","title":"CETS node discovery","text":"

The discovery_nodes table used by the CETS internal database has been updated - now each node name can appear only once, while in the past a node could be a part of multiple clusters. Manual intervention might be needed if there are nodes that belong to more than one cluster.

"},{"location":"migrations/6.2.0_6.2.1/#entity-capabilities","title":"Entity capabilities","text":"

mod_caps has a new RDBMS backend, making it possible to use it with CETS. As a result, a new table caps is added to the DB schema.

"},{"location":"migrations/6.2.0_6.2.1/#configuration-changes-outgoing-pools","title":"Configuration changes: outgoing pools","text":"

The outgoing connections option host is now named host_type, see outgoing pools for more information.

The option single_host for the scope has been deprecated, in favour of configuring the specified pools within the host_config section.

"},{"location":"migrations/6.2.0_6.2.1/#functional-changes-presences","title":"Functional changes: presences","text":"

mod_presence was internally refactored to modernise and improve the performance of the code, but as a side-effect, code for XEP-0018 was removed. Note that this XEP was not advertised and as a matter of fact was deprecated already in 2003, so if your client was depending on it, it is high time to update.

"},{"location":"migrations/6.2.0_6.2.1/#upgrade-procedure","title":"Upgrade procedure","text":"

The standard migration procedure is to stop the cluster, apply the DB migrations, and start the new version of the cluster.

Should you require no downtime, you could apply the DB migration first, and then perform the rolling upgrade procedure - either manually or using helm or kubectl. If you are using CETS, the restarted nodes will stay disconnected from the ones still running the previous version, causing transient connectivity issues between the end users connected to different parts of the cluster. This is due to changes in the internal CETS API.

"},{"location":"migrations/6.2.1_x.x.x/","title":"6.2.1 to x.x.x","text":""},{"location":"migrations/6.2.1_x.x.x/#hooks","title":"Hooks","text":"

Hook names have been unified by removing the _hook prefix from the few hooks which used it, e.g. offline_message_hook is now called offline_message. This change affects the hook metric names as well.

"},{"location":"migrations/jid-from-mam-muc-script/","title":"MAM MUC migration helper","text":""},{"location":"migrations/jid-from-mam-muc-script/#the-purpose-of-sender-jid-from-mam-messageescript","title":"The purpose of sender-jid-from-mam-message.escript","text":"

This script may be used as a part of migration from MongooseIM 3.3.0 (or older). It is able to extract a JID of a groupchat message sender from an XML payload. This piece of information is essential for GDPR commands (retrieve data and remove user) to work properly, as without it the operations on MAM MUC data in DB would be extremely inefficient.

Please consult \"3.3.0 to...\" migration guide for details. DB-specific sections describe where the payloads are stored and what you should do with the extracted JID.

"},{"location":"migrations/jid-from-mam-muc-script/#requirements","title":"Requirements","text":"

This script may be executed in every *nix environment which has OTP 19.0 (or newer) installed and escript executable is in PATH.

It doesn't depend on any MongooseIM code or library, so it may be used as a standalone file.

"},{"location":"migrations/jid-from-mam-muc-script/#how-to-use","title":"How to use?","text":"

sender-jid-from-mam-message.escript (eterm | xml)

The only parameter required by the script is the input format.

You should use eterm if (in MongooseIM config file):

  • You haven't set db_message_format option for MAM at all.
  • db_message_format is set to mam_message_compressed_eterm or mam_message_eterm

You should use the xml option if:

  • db_message_format is set to mam_message_xml.

Once started, the script will run in an infinite loop (until killed or interrupted), expecting a stream of inputs. For every provided payload, a JID will be returned immediately. All communication with the script is done via stdio.

"},{"location":"migrations/jid-from-mam-muc-script/#input-format","title":"Input format","text":"

For both eterm and xml mode, the script expects an input in a very similar format. The high-level overview is:

LENGTH\\nPAYLOAD\n
  • LENGTH is the PAYLOAD length in bytes; if the data retrieved from a DBMS is a Unicode string, LENGTH is equal to the number of bytes used to encode this string
  • PAYLOAD is a sequence of bytes; if a DBMS returns binary data encoded as hex, then it has to be decoded to raw bytes
  • LENGTH and PAYLOAD are separated with a newline character (ASCII code 10 / 0x0a)
"},{"location":"migrations/jid-from-mam-muc-script/#output-format","title":"Output format","text":"

The script output format is very similar to the input:

LENGTH\\nJID\n
  • LENGTH is the number of bytes in a JID
  • JID is a sequence of bytes, which encodes a Unicode string
  • LENGTH and PAYLOAD are separated with a newline character (ASCII code 10 / 0x0a)

In case of an error (that is not a critical error, like I/O failure), script will print -N\\n (where N is an error code) and will continue to work. Technically it's -N for LENGTH, followed by a newline character and no PAYLOAD part (or 0-length PAYLOAD if you like). The following error codes are supported: * -1\\n - Unknown error. Something went wrong with the JID extraction (most likely malformed input). * -2\\n - Invalid message type. The message / stanza has been decoded successfully, but it's not a groupchat message.

"},{"location":"migrations/jid-from-mam-muc-script/#examples","title":"Examples","text":"

tools/migration folder contains two files: sender-jid-from-mam-message.example.eterm and sender-jid-from-mam-message.example.xml. They are input samples for the script and may be used as a reference for the script usage.

You can test them by running:

  • tools/migration/sender-jid-from-mam-message.escript eterm < sender-jid-from-mam-message.example.eterm > out
  • tools/migration/sender-jid-from-mam-message.escript xml < sender-jid-from-mam-message.example.xml > out

In both cases the out file should have the following content:

37\ng\u017ceg\u017c\u00f3\u0142ka@brz\u0119czyszczykiewicz.pl\n
"},{"location":"migrations/jid-from-mam-muc-script/#debug","title":"Debug","text":"

If an environment variable DEBUG is set to 1, the script will store error messages in a /tmp/script-debug file.

"},{"location":"modules/mod_adhoc/","title":"mod_adhoc","text":""},{"location":"modules/mod_adhoc/#module-description","title":"Module Description","text":"

This module implements XEP-0050: Ad-Hoc Commands. It allows XMPP entities to remotely execute various commands using forms.

"},{"location":"modules/mod_adhoc/#options","title":"Options","text":""},{"location":"modules/mod_adhoc/#modulesmod_adhociqdisctype","title":"modules.mod_adhoc.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_adhoc/#modulesmod_adhocreport_commands_node","title":"modules.mod_adhoc.report_commands_node","text":"
  • Syntax: boolean
  • Default: false
  • Example: report_commands_node = true

Determines whether the Ad-Hoc Commands should be announced upon Service Discovery.

"},{"location":"modules/mod_adhoc/#example-configuration","title":"Example configuration","text":"
[modules.mod_adhoc]\n  report_commands_node = true\n
"},{"location":"modules/mod_amp/","title":"mod_amp","text":""},{"location":"modules/mod_amp/#module-description","title":"Module Description","text":"

This module enables support for a subset of the functionality described under XEP-0079: Advanced Message Processing. It currently does not provide features related to timed delivery, i.e the expire-at condition.

The error and notify actions are supported, while alert and drop are not. See more below, under XEP Support.

"},{"location":"modules/mod_amp/#options","title":"Options","text":"

None.

"},{"location":"modules/mod_amp/#example-configuration","title":"Example Configuration","text":"
[modules.mod_amp]\n
"},{"location":"modules/mod_amp/#xep-support","title":"XEP Support","text":"

What follows is a short description of which parts of the XEP-0079 specification mod_amp supports.

2.1.1 Service Discovery

  • Both the service discovery information response (Ex.1, 2) and the request/response for individual actions and conditions (Ex.3, 4) are supported.

2.1.2 Specifying Semantics

  • \"Per-hop\" rule semantics are not supported, i.e. ignored.

2.2 Server Processing

  • 2.2.1 Validating Semantics: Performed as in the XEP. The first message to fail validation determines the error message.
  • 2.2.2 supported to spec.
  • 2.2.3 supported to spec.
  • 2.2.4 supported for actions: error and notify.
  • 2.2.5 supported for events: error and notify.

3.3 Defined Conditions

  • 3.3.1 deliver: supported for values: direct, stored, and none. The stored condition works with mod_mam and mod_offline.

    Note

    If both mod_mam and mod_offline are enabled, some delivery conditions may not work correctly.

  • 3.3.2 expire-at: not supported

  • 3.3.3 match-resource: supported

3.4 Defined Actions

  • 3.4.1 alert: not supported
  • 3.4.2 drop: not supported
  • 3.4.3 error: supported
  • 3.4.4 notify: supported. Notifications for the stored and direct conditions are sent as soon as the message has been stored or sent to the recipient.

6. Error Handling

  • 6.2.1 Unsupported Action: supported
  • 6.2.2 Unsupported Condition: supported
  • 6.2.3 Not Acceptable: supported
  • 6.2.4 Service Unavailable is not supported, as it pertains to \"per-hop\" rule processing
  • 6.2.5 Undefined Condition: supported

8. Stream Feature

  • supported

9. Security Considerations

  • Currently, the security measures described in this section have not been implemented. It follows that mod_amp, in its current state, should only be enabled for servers/domains where user presence leaks are not a threat, i.e services where all users can see each other's presence by default.
"},{"location":"modules/mod_amp/#modifications","title":"Modifications","text":"

The following behaviour differs from or extends the guidelines provided in the XEP.

  • The action for the deliver condition with value stored is deferred until the message is stored by mod_mam or mod_offline.
  • The action for the deliver condition with value direct is deferred until the message is sent to the recipient's socket.
"},{"location":"modules/mod_amp/#server-processing-details","title":"Server Processing Details","text":"

When a message with AMP rules is being processed by the server, several system events may occur. For a given event, the rules are processed and each of them can get the matched or undecided status or, if the conditions are not met, it gets no status. If any rules get the matched status, the action for the first of them is performed. After that, the rule list is filtered so that only the undecided ones are left in the message, as they may be matched later.

The following system events are defined:

  • initial check - always occurs first, when the message enters the system.
  • mod_mam failed - mod_mam is enabled but fails to store the message.
  • mod_offline failed - the recipient is offline and mod_offline is enabled but fails to store the message.
  • archived - either mod_mam or mod_offline has successfully stored the message.
  • delivery failed - the message was about to be delivered, but it could not be sent.
  • delivered - the message has been sent to the recipient. Mutually exclusive with delivery failed.

Rule status is determined for each system event in the following way:

  • initial check

    • If the recipient is online, rules for the direct and none values of the deliver condition become undecided, except rules for the direct value with action error or drop, which become matched. If mod_mam is enabled, rules for the stored value of the deliver condition become undecided.
      • If the recipient has a session for the target resource, rules for the exact and any values of the match-resource condition become matched.
      • Otherwise, rules for the other and any values of the match-resource condition become matched.
    • If the recipient is offline:
      • If mod_mam or mod_offline is enabled, rules for the stored and none values of the deliver conditions become undecided, except rules for the stored value with action error or drop, which become matched.
      • If both mod_mam and mod_offline are disabled, rules for the none delivery condition become matched.
  • mod_mam failed

    • If the recipient is online, rules for direct and none values of the deliver condition become undecided.
    • If the recipient is offline, rules for the none value of the deliver condition become matched.
  • mod_offline failed

    • Rules for the none value of the deliver condition become matched.
  • archived

    • If the recipient is online, rules for direct and stored values of the deliver condition become undecided.
    • If the recipient is offline, rules for the stored value of the deliver condition become matched.
  • delivery failed

    • Rules for the none and stored value of the deliver condition become matched.
  • delivered

    • Rules for the direct value of the deliver condition become matched.
"},{"location":"modules/mod_auth_token/","title":"mod_auth_token","text":""},{"location":"modules/mod_auth_token/#module-description","title":"Module Description","text":"

This module implements handling of tokens in an OAuth-like authentication scheme. It provides services necessary to:

  • deserialize/serialize binary tokens received and issued by the server,
  • validate incoming binary tokens, i.e.:
    • check integrity using Message Authentication Codes (MAC) with server-side stored user keys,
    • check validity against the configured validity duration times,
    • check revocation status,
  • handle token requests from logged in users.

The module itself does not implement protocol related details - these are implemented in cyrsasl.erl. Generation of keys necessary to sign binary tokens is delegated to module mod_keystore.erl.

"},{"location":"modules/mod_auth_token/#options","title":"Options","text":""},{"location":"modules/mod_auth_token/#modulesmod_auth_tokenbackend","title":"modules.mod_auth_token.backend","text":"
  • Syntax: non-empty string
  • Default: \"rdbms\"
  • Example: backend = \"rdbms\"

Token storage backend. Currently only \"rdbms\" is supported.

"},{"location":"modules/mod_auth_token/#modulesmod_auth_tokeniqdisctype","title":"modules.mod_auth_token.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming IQ stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_auth_token/#modulesmod_auth_tokenvalidity_period","title":"modules.mod_auth_token.validity_period","text":"
  • Syntax: TOML table. Each key is either access or refresh. Each value is a nested TOML table with the following mandatory keys: value (non-negative integer) and unit (\"days\", \"hours\", \"minutes\" or \"seconds\").
  • Default: {access = {value = 1, unit = \"hours\"}, refresh = {value = 25, unit = \"days\"}}
  • Example: validity_period.access = {value = 30, unit = \"minutes\"}

Validity periods of access and refresh tokens can be defined independently - specifying one of them does not change the default value for the other one. Validity period configuration for provision tokens happens outside the module since the server does not generate provision tokens - it only validates them.

"},{"location":"modules/mod_auth_token/#required-keys","title":"Required keys","text":"

To read more about the keys MongooseIM makes use of, please refer to mod_keystore documentation, where you can find an example configuration when using mod_auth_token.

"},{"location":"modules/mod_auth_token/#token-types","title":"Token types","text":"

Three token types are supported:

  • access tokens: These are short lived tokens which grants aren't tracked by the server (i.e. there's no need to store anything in a database). Access tokens can be used as a payload for the X-OAUTH authentication mechanism and grant access to the system. Access tokens can't be revoked. An access token is valid only until its expiry date is reached. In mod_keystore, the keyname for this token type is token_secret.

  • refresh tokens: These are longer lived tokens which are tracked by the server and therefore require persistent storage in a relational database. Refresh tokens can be used as a payload for the X-OAUTH authentication mechanism and to grant access to the system. Also they can result in a new set of tokens being returned upon successful authentication. They can be revoked - if a refresh token hasn't been revoked, it is valid until it has expired. On revocation, it immediately becomes invalid. As the server stores information about granted tokens, it can also persistently mark them as revoked. In mod_keystore, the keyname for this token type is token_secret.

  • provision tokens: These tokens are generated by a service external to the server. They grant the owner a permission to create an account. A provision token may contain information which the server can use to provision the VCard for the newly created account. Using a provision token to create an account (and inject VCard data) is done similarly to other token types, i.e. by passing it as payload for the X-OAUTH mechanism. The XMPP server has no way of tracking and revoking provision tokens, as they come from an outside source. In mod_keystore, the keyname for this token type is provision_pre_shared. The usage of this token type is optional.

"},{"location":"modules/mod_auth_token/#token-serialization-format","title":"Token serialization format","text":"

All tokens (access, refresh, provision) are to be exchanged as Base64 encoded binary data. Serialization format of the token before encoding with Base64 is dependent on its type:

'access' \\0 <BARE_JID> \\0 <EXPIRES_AT> \\0 <MAC>\n\n'refresh' \\0 <BARE_JID> \\0 <EXPIRES_AT> \\0 <SEQUENCE_NO> \\0 <MAC>\n\n'provision' \\0 <BARE_JID> \\0 <EXPIRES_AT> \\0 <VCARD> \\0 <MAC>\n

For example (these tokens are randomly generated, hence field values don't make much sense - line breaks are inserted only for the sake of formatting,<vCard/> inner XML is snipped):

'access' \\0 Q8@localhost \\0 64875466454\n    \\0 0acd0a66d06934791d046060cf9f1ad3c2abb3274cc7e7d7b2bc7e2ac4453ed774b6c6813b40ebec2bbc3774d59d4087\n\n'refresh' \\0 qp@localhost \\0 64875466457 \\0 6\n    \\0 8f57cb019cd6dc6e7779be165b9558611baf71ee4a40d03e77b78b069f482f96c9d23b1ac1ef69f64c1a1db3d36a96ad\n\n'provision' \\0 Xmi4@localhost \\0 64875466458 \\0 <vCard>...</vCard>\n    \\0 86cd344c98b345390c1961e12cd4005659b4b0b3c7ec475bde9acc9d47eec27e8ddc67003696af582747fb52e578a715\n
"},{"location":"modules/mod_auth_token/#requesting-access-or-refresh-tokens-when-logged-in","title":"Requesting access or refresh tokens when logged in","text":"
<iq type='get' to='john@localhost' id='123'>\n    <query xmlns='erlang-solutions.com:xmpp:token-auth:0'/>\n</iq>\n

To request access and refresh tokens for the first time a client should send an IQ stanza after they have successfully authenticated for the first time using some other method.

"},{"location":"modules/mod_auth_token/#token-response-format","title":"Token response format","text":"

Requested tokens are being returned by the server wrapped in IQ stanza with the following fields:

  • id: value taken from the request IQ stanza
  • type: result
  • from: bare user JID
  • to: full user JID

Example response (encoded tokens have been truncated in this example):

<iq  id='123' type='result' from='john@localhost' to='john@localhost/res1'>\n    <items xmlns='erlang-solutions.com:xmpp:token-auth:0'>\n        <access_token>cmVmcmVzaAGQ1Mzk1MmZlYzhkYjhlOTQzM2UxMw==</access_token>\n        <refresh_token>cmVmcmVzaAGQ1Mzk1MmZlYzhkYjhlOTQzM2UxMw==</refresh_token>\n    </items>\n</iq>\n

Once a client has obtained a token, they may start authenticating using the X-OAUTH SASL mechanism when reaching the authentication phase of an XMPP connection initiation.

"},{"location":"modules/mod_auth_token/#login-with-access-or-refresh-token","title":"Login with access or refresh token","text":"

In order to log into the XMPP server using a previously requested token, a client should send the following stanza:

<auth xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\" mechanism=\"X-OAUTH\">\ncmVmcmVzaAGQ1Mzk1MmZlYzhkYjhlOTQzM2UxMw== \n</auth>\n

The Base64 encoded content is a token obtained prior to authentication. Authentication will succeed unless the used tokens are expired, revoked, or the keys required for MAC verification could not be found by the server.

When using a refresh token to authenticate with the server, the server will respond with a new access token:

<success xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\">\ncmVmcmVzaAGQ1Mzk1MmZlYzhkYjhlOTQzM2UxMw==\n</success>\n

The above response is to be expected unless the refresh token used is expired or there were some problems processing the key on the server side.

"},{"location":"modules/mod_auth_token/#token-revocation-using-command-line-tool","title":"Token revocation using command line tool","text":"

Refresh tokens issued by the server can be used to:

  • log in a user: as an authentication valet,
  • request a new access token with refreshed expiry date.

An administrator may revoke a refresh token:

mongooseimctl token revokeToken --user owner@xmpphost\n

A client can no longer use a revoked token either for authentication or requesting new access tokens. After a client's token has been revoked, in order to obtain a new refresh token a client has to log in using some other method.

Caveat: as of now, the user's session is not terminated automatically on token revocation. Therefore, the user might request a new set of tokens for as long as the session is active, even though their previous token was just revoked (possibly due to a breach / token leak). Moreover, an access token still kept on a compromised device can be used to establish a new session for as long as it's valid - access tokens can't be revoked. To alleviate rerequesting tokens by the user, an operator can use mod_admin extension allowing to terminate the user's connection. Access token validity can't be sidestepped right now.

"},{"location":"modules/mod_auth_token/#example-configuration","title":"Example configuration","text":"
[modules.mod_auth_token]\n  validity_period.access = {value = 13, unit = \"minutes\"}\n  validity_period.refresh = {value = 13, unit = \"days\"}\n
"},{"location":"modules/mod_bind2/","title":"mod_bind2","text":""},{"location":"modules/mod_bind2/#module-description","title":"Module Description","text":"

Implements XEP-0386: Bind 2.

"},{"location":"modules/mod_blocking/","title":"mod_blocking","text":""},{"location":"modules/mod_blocking/#module-description","title":"Module Description","text":"

This module implements XEP-0191: Blocking command. The extension allows blocking the whole communication with a user (or a group of users) with a single command. The protocol is much simpler than privacy lists.

"},{"location":"modules/mod_blocking/#options","title":"Options","text":"

None.

"},{"location":"modules/mod_blocking/#example-configuration","title":"Example Configuration","text":"
[modules.mod_blocking]\n

The module is not configurable because internally it is an interface to privacy lists, so settings like storage backend apply to it automatically.

Issuing a blocking command creates a privacy list named \"blocking\" (if it didn't exist), adds to it items being blocked and sets this list as the default. Unblocking contacts removes them from \"blocking\" privacy list.

If the user has other online resources which use privacy lists it may result in a different behaviour per resource; this is normal, and provided for in XEP.

Similar to privacy lists, a blocked contact sees the user as offline no matter what their real status is.

If the contact being blocked is subscribed to the user's presence, they receive an \"unavailable\" presence; when unblocked, they receive the current status of the user.

"},{"location":"modules/mod_bosh/","title":"mod_bosh","text":""},{"location":"modules/mod_bosh/#module-description","title":"Module Description","text":"

This module implements XEP-0206: XMPP Over BOSH (using XEP-0124: Bidirectional-streams Over Synchronous HTTP (BOSH)), allowing clients to connect to MongooseIM over regular HTTP long-lived connections.

If you want to use BOSH, you must enable it both in the listen section of mongooseim.toml (Listener Modules) and as a module.

"},{"location":"modules/mod_bosh/#options","title":"Options","text":""},{"location":"modules/mod_bosh/#modulesmod_boshbackend","title":"modules.mod_bosh.backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"

Backend to use for storing BOSH connections.

Warning

The corresponding internal database has to be enabled.

"},{"location":"modules/mod_bosh/#modulesmod_boshinactivity","title":"modules.mod_bosh.inactivity","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 30
  • Example: inactivity = 30

Maximum allowed inactivity time (in seconds) for a BOSH connection. Please note that a long-polling request is not considered to be an inactivity.

"},{"location":"modules/mod_bosh/#modulesmod_boshmax_wait","title":"modules.mod_bosh.max_wait","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_wait = 30

This is the longest time (in seconds) that the connection manager will wait before responding to any request during the session.

"},{"location":"modules/mod_bosh/#modulesmod_boshserver_acks","title":"modules.mod_bosh.server_acks","text":"
  • Syntax: boolean
  • Default: false
  • Example: server_acks = true

Enables/disables acks sent by server.

"},{"location":"modules/mod_bosh/#modulesmod_boshmax_pause","title":"modules.mod_bosh.max_pause","text":"
  • Syntax: positive integer
  • Default: 120
  • Example: max_pause = 30

Maximum allowed pause in seconds (e.g. to switch between pages and then resume connection) to request by client-side.

"},{"location":"modules/mod_bosh/#example-configuration","title":"Example Configuration","text":"

In the listener section:

[[listen.http]]\n  port = 5280\n  transport.num_acceptors = 10\n  transport.max_connections = 1024\n\n  [[listen.http.handlers.mod_bosh]]\n    host = \"_\"\n    path = \"/http-bind\"\n

In the module section:

[modules.mod_bosh]\n  inactivity = 20\n  max_wait = \"infinity\"\n  server_acks = true\n  max_pause = 120\n
"},{"location":"modules/mod_cache_users/","title":"mod_cache_users","text":""},{"location":"modules/mod_cache_users/#module-description","title":"Module Description","text":"

mod_cache_users is a module that caches whether a user exists, and possibly stores metadata assigned to them. This is useful for example to decide if a message should be stored in MAM or Inbox \u2014 for example, the receiver might not exist, so no message should be stored in his archive nor his inbox.

This cache uses segmented cache under the hood, for more details, read the library documentation.

"},{"location":"modules/mod_cache_users/#options","title":"Options","text":""},{"location":"modules/mod_cache_users/#modulesmod_cache_usersstrategy","title":"modules.mod_cache_users.strategy","text":"
  • Syntax: string, one of fifo or lru
  • Default: fifo
  • Example: strategy = \"lru\"

Eviction strategy for the cache. FIFO is simply a queue, that ensures records will eventually be evicted and require reloading; LRU ensures queried records keep moving to the front of the queue, possibly keeping them alive forever.

"},{"location":"modules/mod_cache_users/#modulesmod_cache_userstime_to_live","title":"modules.mod_cache_users.time_to_live","text":"
  • Syntax: integer, in minutes, or the string \"infinity\"
  • Default: 8 * 60 (8h)
  • Example: time_to_live = 480

Time between rotations, that is, the time a single table will live. A record that is inserted in the first table will live as long as this ttl multiplied by the number of tables.

"},{"location":"modules/mod_cache_users/#modulesmod_cache_usersnumber_of_segments","title":"modules.mod_cache_users.number_of_segments","text":"
  • Syntax: integer
  • Default: 3
  • Example: number_of_segments = 3

Number of segments the cache has. The more segments there are, the more fine-grained the cache can be, but the slower queries will be: query the cache checks the tables in order until a match is found.

"},{"location":"modules/mod_cache_users/#example-configuration","title":"Example configuration","text":"
[modules.mod_cache_users]\n  strategy = \"lru\"\n  time_to_live = 60\n  number_of_segments = 1\n
"},{"location":"modules/mod_caps/","title":"mod_caps","text":""},{"location":"modules/mod_caps/#module-description","title":"Module description","text":"

This module provides a presence-based mechanism for exchanging information about entity capabilities as defined in XEP-0115: Entity Capabilities. Additionally, it filters out PEP messages that the recipient declared (in announced caps) being not capable of handling. It is not this module's responsibility to intercept and answer disco requests routed between clients.

"},{"location":"modules/mod_caps/#options","title":"Options","text":"

This module expects two optional arguments that apply to cache tab:

"},{"location":"modules/mod_caps/#modulesmod_capscache_size","title":"modules.mod_caps.cache_size","text":"
  • Syntax: positive integer
  • Default: 1000
  • Example: cache_size = 2000

The size of a cache_tab (the amount of entries) holding the information about capabilities of each user.

"},{"location":"modules/mod_caps/#modulesmod_capscache_life_time","title":"modules.mod_caps.cache_life_time","text":"
  • Syntax: positive integer
  • Default: 86_400 (24 hours)
  • Example: cache_life_time = 10_000

Time (in seconds) after which entries will be removed.

"},{"location":"modules/mod_caps/#modulesmod_capsbackend","title":"modules.mod_caps.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\".
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"
"},{"location":"modules/mod_caps/#example-configuration","title":"Example Configuration","text":"
[modules.mod_caps]\n  cache_size = 2000\n  cache_life_time = 10_000\n
"},{"location":"modules/mod_carboncopy/","title":"mod_carboncopy","text":""},{"location":"modules/mod_carboncopy/#module-description","title":"Module Description","text":""},{"location":"modules/mod_carboncopy/#discovering-support","title":"Discovering Support","text":"

The server uses a disco query to inform if carbons are enabled.

"},{"location":"modules/mod_carboncopy/#enabling-and-disabling-carbons-from-the-client","title":"Enabling and disabling Carbons from the client","text":"

Carbons are not enabled by default. Every client app has to enable carbons to get messages sent to other clients of the user. Carbons are enabled and disabled with an iq stanza with a child element - <enable xmlns='urn:xmpp:carbons:2'/> or <disable xmlns='urn:xmpp:carbons:2'/>.

"},{"location":"modules/mod_carboncopy/#receiving-messages-to-a-bare-jid","title":"Receiving messages to a bare JID","text":"

Each message to a bare JID is forked and sent to all carbon enabled resources of the recipient, and not just to the highest priority resource. Sending multiple copies to same resource is avoided.

"},{"location":"modules/mod_carboncopy/#receiving-messages-to-full-jid","title":"Receiving messages to full JID","text":"

Each directed message to a full JID is also forwarded to all carbon enabled resources of the recipient. The message is wrapped in the <forwarded xmlns='urn:xmpp:forward:0'></forwarded> tag and directed towards each carbon enabled resource.

"},{"location":"modules/mod_carboncopy/#sending-messages","title":"Sending Messages","text":"

Just as when receiving messages to a full JID, each sent message is forwarded to all carbon enabled resources of recipient. The message is wrapped in the <forwarded xmlns='urn:xmpp:forward:0'></forwarded> tag and is directed towards each carbon enabled resource.

"},{"location":"modules/mod_carboncopy/#private-messages","title":"Private Messages","text":"

Private messages are tagged <private/> and are not forwarded to any carbon enabled resource of the sender and recipient if the to attribute contains a full JID. However, if the message is sent to a bare JID, it is forked to all highest priority resources. This is not done through mod_carboncopy but is an expected outcome.

"},{"location":"modules/mod_carboncopy/#multiple-enabledisable-requests","title":"Multiple enable/disable requests","text":"

Multiple enable/disable requests are not treated as an error even if they come from the same resource.

"},{"location":"modules/mod_carboncopy/#behavior-with-other-modules","title":"Behavior with other modules","text":"
  • mod_offline: Offline messages are delivered as they are. Since, only one resource can connect at a time and there will be a finite time delay between login from two resources, mod_carboncopy has no role to play and only one resource can receive offline messages. Other resources can retrieve old messages from the archive.
  • mod_mam: mod_mam covers only direct messages from one user to another. All the forked messages for a message sent with a bare JID are ignored by mod_mam. Similarly, all the carbon messages are also ignored by mod_mam.
"},{"location":"modules/mod_carboncopy/#retrieving-archive-from-multiple-resources","title":"Retrieving archive from multiple resources","text":"

A resource can retrieve archives of messages sent to a specific resource of a friend which will not contain any carbon messages. It will only contain messages directed towards that resource or messages sent with a bare jid when that resource was at the highest priority. A request to mod_mam with a bare JID of the chosen user will retrieve all messages to them from any resource. There are no instances of copies of same messages being sent by mod_mam. This is because mod_mam does not archive carbon messages.

"},{"location":"modules/mod_carboncopy/#testing-with-a-client","title":"Testing with a client","text":"

The module and its behavior have been tested with mod_offline and mod_mam using a desktop client made in Java using the Smack library. The standard Smack library for carbons is able to unpack and read the carbon messages. Also, the standard library supports checking for carbon support by the server using disco and sending enable and disable requests for carbon messages. A client needs to synchronize with mod_offline and mod_mam. Once a client is online and enables carbons, it will not receive all the messages. mod_mam does not capture any carbon messages so it does not send any duplicates during any archive request. Only the simple chat messages are archived and they can be accessed by using the bare JID of the user for whom the archive is requested. For an Erlang-based test suite, please see this.

"},{"location":"modules/mod_carboncopy/#options","title":"Options","text":""},{"location":"modules/mod_carboncopy/#modulesmod_carboncopyiqdisctype","title":"modules.mod_carboncopy.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: no_queue

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_carboncopy/#example-configuration","title":"Example Configuration","text":"
[modules.mod_carboncopy]\n  iqdisc.type = \"no_queue\"\n
"},{"location":"modules/mod_csi/","title":"mod_csi","text":""},{"location":"modules/mod_csi/#module-description","title":"Module Description","text":"

Enables XEP-0352: Client State Indication functionality.

The XEP doesn't require any specific server behaviour in response to CSI stanzas, there are only some suggestions. The implementation in MongooseIM will simply buffer all packets (up to a configured limit) when the session is \"inactive\" and will flush the buffer when it becomes \"active\" again.

"},{"location":"modules/mod_csi/#options","title":"Options","text":""},{"location":"modules/mod_csi/#modulesmod_csibuffer_max","title":"modules.mod_csi.buffer_max","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 20
  • Example: buffer_max = 40

Buffer size for messages queued when session was inactive.

"},{"location":"modules/mod_csi/#example-configuration","title":"Example Configuration","text":"
[modules.mod_csi]\n  buffer_max = 40\n
"},{"location":"modules/mod_csi/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [Host, modCSIInactive] spiral A client becomes inactive. [Host, modCSIActive] spiral A client becomes active."},{"location":"modules/mod_disco/","title":"mod_disco","text":""},{"location":"modules/mod_disco/#module-description","title":"Module Description","text":"

Implements XEP-0030: Service Discovery. The module itself provides only the essential disco interface, the actual capabilities announced by Service Discovery are gathered via executing a fold-type hook.

"},{"location":"modules/mod_disco/#options","title":"Options","text":""},{"location":"modules/mod_disco/#modulesmod_discoiqdisctype","title":"modules.mod_disco.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_disco/#modulesmod_discoextra_domains","title":"modules.mod_disco.extra_domains","text":"
  • Syntax: array of strings, valid domain names
  • Default: no extra domains
  • Example: extra_domains = [\"custom_domain\"]

Adds domains that are not registered with other means to a local item announcement (response to http://jabber.org/protocol/disco#items IQ get). Please note that mod_disco doesn't verify these domains, so if no handlers are registered later for them, a client will receive a service-unavailable error for every stanza sent to one of these hosts.

"},{"location":"modules/mod_disco/#modulesmod_discoserver_info","title":"modules.mod_disco.server_info","text":"
  • Syntax: array of tables described below
  • Default: no additional server info
  • Example:
    server_info = [\n                {name = \"abuse-address\", urls = [\"admin@example.com\"]}\n              ]\n
    Adds extra disco information to all or chosen modules. New fields will be added in a manner compliant with XEP-0157: Contact Addresses for XMPP Services.

Keys and their values for each entry:

  • name - required, a non-empty string with the name of the field
  • urls - required, an array of valid addresses
  • modules - optional, an array of module names for which the additional server information is to be returned. By default the server information is returned for all modules.
"},{"location":"modules/mod_disco/#modulesmod_discousers_can_see_hidden_services","title":"modules.mod_disco.users_can_see_hidden_services","text":"
  • Syntax: boolean
  • Default: true
  • Example: users_can_see_hidden_services = false

MongooseIM node with this option set to false will exclude \"hidden components\" from disco results sent to clients (identified by bare or full JID). Other entities, with empty username part in their JIDs (e.g. component.example.com), will still receive full disco results.

"},{"location":"modules/mod_disco/#example-configuration","title":"Example Configuration","text":"
[modules.mod_disco]\n  iqdisc.type = \"one_queue\"\n  extra_domains = [\"some_domain\", \"another_domain\"]\n  server_info = [\n    {name = \"abuse-address\", urls = [\"admin@example.com\"]},\n    {name = \"friendly-spirits\", urls = [\"spirit1@localhost\", \"spirit2@localhost\"], modules = [\"mod_muc\", \"mod_disco\"]}\n  ]\n  users_can_see_hidden_services = true\n
"},{"location":"modules/mod_domain_isolation/","title":"mod_domain_isolation","text":""},{"location":"modules/mod_domain_isolation/#module-description","title":"Module Description","text":"

This module limits message passing between domains. When it is enabled, users won't be able to contact each other if they are registered in different domains.

"},{"location":"modules/mod_domain_isolation/#options","title":"Options","text":"

This module has no configuration. Putting the following entry in the config file is enough.

"},{"location":"modules/mod_domain_isolation/#example-configuration","title":"Example configuration","text":"
[modules.mod_domain_isolation]\n
"},{"location":"modules/mod_event_pusher/","title":"mod_event_pusher","text":""},{"location":"modules/mod_event_pusher/#module-description","title":"Module Description","text":"

This module is a generic interface for event-pushing backends. It defines a single callback, push_event/2 that forwards the event to all registered backends. Each backend decides how and if to handle the event in its push_event/2 implementation.

Currently supported backends include http, push, rabbit and sns. Refer to their specific documentation to learn more about their functions and configuration options.

"},{"location":"modules/mod_event_pusher/#how-it-works","title":"How it works","text":"

The events are standardized as records that can be found in the mod_event_pusher_events.hrl file. Common events like user presence changes (offline and online), chat and groupchat messages (incoming and outgoing) are already handled in the mod_event_pusher_hook_translator module, which is a proxy between various hooks and the push_event/2 handler.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_event_pusher/#configuration","title":"Configuration","text":"

Each backend is configured in a corresponding subsection. The example below enables all backends. The [modules.mod_event_pusher] section itself is omitted - this is allowed in TOML, because the presence of a subsection implies that the corresponding parent section is also present.

Note

Some backends require configuring connection pools in the outgoing_pools section. See the detailed documentation for each backend.

[modules.mod_event_pusher.sns]\n  presence_updates_topic = \"user_presence_updated\"\n  pm_messages_topic = \"user_message_sent\"\n  muc_messages_topic = \"user_messagegroup_sent\"\n  sns_host = \"eu-west-1.amazonaws.com\"\n  region = \"eu-west-1\"\n  access_key_id = \"AKIAIOSFODNN7EXAMPLE\"\n  secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\n  account_id = \"123456789012\"\n\n[modules.mod_event_pusher.push]\n  wpool.workers = 200\n\n[modules.mod_event_pusher.http]\n  handlers = [{path = \"/notifications\"}]\n\n[modules.mod_event_pusher.rabbit]\n
"},{"location":"modules/mod_event_pusher_http/","title":"HTTP backend","text":""},{"location":"modules/mod_event_pusher_http/#module-description","title":"Module description","text":"

This module is a backend of mod_event_pusher that enables forwarding certain events (messages, presence, etc.) via HTTP to external services such as push (by mobile, email or SMS), big data, or analytics services.

"},{"location":"modules/mod_event_pusher_http/#how-it-works","title":"How it works","text":"

The module hooks on all packets sent by connected users. When the hook is triggered, the module:

  • runs a callback module's should_make_req/6 function to see if a notification should be sent
  • runs a callback module's prepare_headers/7 to get http headers to be used
  • runs a callback module's prepare_body/7
  • sends a POST request composed of {Host::binary(), Sender::binary(), Receiver::binary(), Message::binary()} to the http notification server

You can configure multiple handlers e.g. for sending various types of messages to different HTTP servers.

"},{"location":"modules/mod_event_pusher_http/#prerequisites","title":"Prerequisites","text":"

This module uses a connection pool created by mongoose_http_client. It must be defined in the outgoing_pools settings.

"},{"location":"modules/mod_event_pusher_http/#options","title":"Options","text":""},{"location":"modules/mod_event_pusher_http/#modulesmod_event_pusherhttphandlers","title":"modules.mod_event_pusher.http.handlers","text":"
  • Syntax: array of TOML tables with the keys described below
  • Default: empty list

A list of handler definitions. All handlers are applied for each event.

"},{"location":"modules/mod_event_pusher_http/#modulesmod_event_pusherhttphandlerspool_name","title":"modules.mod_event_pusher.http.handlers.pool_name","text":"
  • Syntax: non-empty string
  • Default: \"http_pool\"
  • Example: pool_name = \"http_pool\"

Name of the pool to use to connect to the HTTP server (as defined in outgoing_pools).

"},{"location":"modules/mod_event_pusher_http/#modulesmod_event_pusherhttphandlerspath","title":"modules.mod_event_pusher.http.handlers.path","text":"
  • Syntax: string
  • Default: \"\"
  • Example: path = \"/notifications\"

Path part of an URL to which a request should be sent (will be appended to the pool's prefix path).

"},{"location":"modules/mod_event_pusher_http/#modulesmod_event_pusherhttphandlerscallback_module","title":"modules.mod_event_pusher.http.handlers.callback_module","text":"
  • Syntax: string
  • Default: \"mod_event_pusher_http_defaults\"
  • Example: callback_module = \"mod_event_pusher_http_notifications\"

Name of a module which should be used to check whether a notification should be sent. The default callback module, mod_event_pusher_http_defaults, sends notifications for all non-empty chat messages. You can use this module as a starting point for developing a custom one.

"},{"location":"modules/mod_event_pusher_http/#example-configuration","title":"Example configuration","text":"
[outgoing_pools.http.http_pool]\n  scope = \"global\"\n  workers = 50\n\n  [outgoing_pools.http.http_pool.connection]\n    host = \"http://localhost:8000\"\n    path_prefix = \"/webservice\"\n    request_timeout = 2000\n\n[modules.mod_event_pusher.http]\n  handlers = [{pool_name = \"http_pool\", path = \"/notifications\"}]\n

Notifications will be POSTed to http://localhost:8000/webservice/notifications. TOML also allows to specify the handler in its own subsection.

[[modules.mod_event_pusher.http.handlers]]\n  pool_name = \"http_pool\"\n  path = \"/notifications\"\n

This alternative syntax is useful for specifying multiple handlers with options:

[[modules.mod_event_pusher.http.handlers]]\n  pool_name = \"http_pool\"\n  path = \"/notifications\"\n  callback_module = \"mod_event_pusher_http_notifications\"\n\n[[modules.mod_event_pusher.http.handlers]]\n  pool_name = \"http_pool\"\n  path = \"/alerts\"\n  callback_module = \"mod_event_pusher_http_alerts\"\n

Here, some notifications will be POSTed to http://localhost:8000/webservice/notifications and some to http://localhost:8000/webservice/alerts, depending on implementation of should_make_req/6 in the two callback modules.

"},{"location":"modules/mod_event_pusher_http/#default-payload-format","title":"Default payload format","text":"

The default HTTP event pusher sends a POST request with Content-Type application/x-www-form-urlencoded. The form has the following fields:

  • author: name of the user who authored the message
  • server: name of the server from where the message originates
  • receiver: name of the user who the message is for
  • message: content of <body> element of the message

The contents of the author, server and receiver fields are processed by stringprep. As a result, these values are all lower case.

"},{"location":"modules/mod_event_pusher_http/#example","title":"Example","text":"

Below is an example of what the body of an HTTP POST request can look like:

\"author=alice&server=localhost&receiver=bob&message=Hi, Bob!\"\n

"},{"location":"modules/mod_event_pusher_http/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [Host, mod_event_pusher_http, sent] spiral An HTTP notification is sent successfully. [Host, mod_event_pusher_http, failed] spiral An HTTP notification failed. [Host, mod_event_pusher_http, response_time] histogram Does not include timings of failed requests."},{"location":"modules/mod_event_pusher_push/","title":"Push backend","text":""},{"location":"modules/mod_event_pusher_push/#module-description","title":"Module Description","text":"

This module is a backend for mod_event_pusher that implements XEP-0357: Push Notifications. It provides push notification data to the service that delivers actual notifications to a client device.

We've prepared a detailed tutorial for a proper push notifications setup on both client and server side.

Please make sure that clients provide all form fields required by the specified PubSub node. Some publish errors may result in disabling push notifications for the specific device until it attempts to enable them again.

This module is very easy to enable, just paste the following to your MongooseIM configuration file:

[modules.mod_event_pusher]\n  push.wpool.workers = 100\n

And that's basically it. You have just enabled the push notification support with 100 asynchronous workers that will handle all push notification related work.

"},{"location":"modules/mod_event_pusher_push/#options","title":"Options","text":""},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushiqdisctype","title":"modules.mod_event_pusher.push.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushbackend","title":"modules.mod_event_pusher.push.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Backend to use for storing the registrations.

"},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushwpool","title":"modules.mod_event_pusher.push.wpool","text":"
  • Syntax: TOML table with worker pool options
  • Default: see description
  • Example: wpool.workers = 200

Pool options that will be passed to the worker_pool library that handles all the requests. The options allowed here are the same as for the outgoing connection pools. The only difference is that the default strategy is \"available_worker\".

"},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushplugin_module","title":"modules.mod_event_pusher.push.plugin_module","text":"
  • Syntax: non-empty string
  • Default: \"mod_event_pusher_push_plugin_defaults\"
  • Example: plugin_module = \"mod_event_pusher_push_plugin_defaults\"

The module implementing mod_event_pusher_push_plugin behaviour, used for dynamic configuration of push notifications. See the relevant section for more details.

"},{"location":"modules/mod_event_pusher_push/#modulesmod_event_pusherpushvirtual_pubsub_hosts","title":"modules.mod_event_pusher.push.virtual_pubsub_hosts","text":"
  • Syntax: array of strings
  • Default: []
  • Example: virtual_pubsub_hosts = [\"host1\", \"host2\"]

The list of \"simulated\" Publish-Subscribe domains. You may use the @HOST@ pattern in the domain name. It will automatically be replaced by a respective XMPP domain (e.g. localhost). See the relevant section for more details.

"},{"location":"modules/mod_event_pusher_push/#virtual-pubsub-hosts","title":"Virtual PubSub hosts","text":"

If a notification is published to one of the configured domains, the internal push notification hook is executed in MongooseIM instead of the XEP-0357 typical behaviour. If an existing PubSub domain is added to this list, it will be shadowed in the push notifications context. To ensure complete shadowing of all the PubSub subdomains you must use the @HOST@ pattern, otherwise only the subdomain of the user is shadowed. It enables easy migration from PubSub-full deployments to PubSub-less variants.

"},{"location":"modules/mod_event_pusher_push/#migration-from-xep-0357-to-virtual-hosts","title":"Migration from XEP-0357 to virtual hosts","text":"

This is an example of how you can migrate the existing setup to the new model. PubSub service still exists, just for the case of a user attempting to create a node. However, its domain is overridden for the purpose of sending push notifications. Please note the value of virtual_pubsub_hosts option. \"pubsub.@HOST@\" is the default domain for mod_pubsub.

[modules.mod_pubsub]\n  plugins = [\"push\"] # mandatory minimal config\n\n[modules.mod_event_pusher.push]\n  backend = \"mnesia\" # optional\n  wpool.workers = 200 # optional\n  plugin_module = \"mod_event_pusher_push_plugin_defaults\" # optional\n  virtual_pubsub_hosts = [\"pubsub.@HOST@\"]\n
"},{"location":"modules/mod_event_pusher_push/#advantages","title":"Advantages","text":"
  • Versatility: PubSub-less and PubSub-full mechanisms can be configured with different domains and therefore give fine-grained control over the push notification handling
  • Takes advantage of the PubSub-less efficiency when told to do so
  • Fully compliant with XEP-0357: Push Notifications and therefore with most 3rd party client libraries
  • Ideal for migrations to PubSub-less deployments.
"},{"location":"modules/mod_event_pusher_push/#drawbacks","title":"Drawbacks","text":"
  • More complex configuration on the server side
  • Pays the PubSub performance penalty when the PubSub path is taken
"},{"location":"modules/mod_event_pusher_push/#plugin-module","title":"Plugin module","text":"

You can also control the format of the \"sender\" of the push notification (which ultimately becomes the title of push notification) and filter which messages will trigger the notification. In order to achieve that, you need to create a plugin module that implements the mod_event_pusher_push_plugin behaviour and enable this plugin in the plugin_module section as above.

A plugin module handles the dynamic configuration of push notifications. It contains the filtering and custom logic for notifying about messages.

Two plugin implementations are provided. They offer different behaviour considering unacknowledged messages when using XEP-0198: Stream Management:

  • mod_event_pusher_push_plugin_defaults, which implements an older behaviour. It does not notify the user of unacknowledged messages immediately after detecting a lost connection to the user.
  • mod_event_pusher_push_plugin_enhanced, which pushes notifications as soon as the server detects that the client has disconnected and waits for stream resumption (by an unack_msg_event event generated by the unacknowledged_message hook). This immediate notification prevents the unneeded suspension of the client's application, if there are no unacknowledged messages yet. This allows to create more power efficient mobile applications.

In order for the enhanced plugin to work, each device (an entity that may receive push notifications) should be uniquely identified. The only correct way to identify a device from the XMPP standpoint is to use the data provided with the enable stanza. Because of that, each device should (re)enable the push notifications at the beginning of each and every connection.

"},{"location":"modules/mod_event_pusher_push/#custom-plugins","title":"Custom plugins","text":"

A custom module implementing the optional callbacks of mod_event_pusher_push_plugin may be used as a plugin to change the default behaviour. In the case of not implemented callbacks the defaults are used instead.

"},{"location":"modules/mod_event_pusher_rabbit/","title":"RabbitMQ backend","text":""},{"location":"modules/mod_event_pusher_rabbit/#current-status","title":"Current status","text":"

This module is still in an experimental phase.

"},{"location":"modules/mod_event_pusher_rabbit/#module-description","title":"Module Description","text":"

This module is a backend of mod_event_pusher that enables support for the RabbitMQ integration. Currently there are 5 available notifications:

  • user presence changed - Carries the user id (full jid by default) and a boolean field corresponding to the current user online status.
  • private message sent/received - Carries the user ids (both sender and receiver) along with the message body.
  • group message sent/received - Carries the user id and the room id (full jids by default) along with the message body.

All these notifications are sent as JSON strings to RabbitMQ exchanges. Type of exchanges can be chosen as desired. Each type of the notifications is sent to its dedicated exchange. There are three exchanges created on startup of the module, for presences, private messages and group chat messages related events.

Messages are published to a RabbitMQ server with routing key being set to a user bare jid (user@domain) and configurable topic e.g alice@localhost.private_message_sent.

The module requires rabbit pool of AMQP connections to be configured in order to make the module work. It's well advised to read through Advanced configuration/Outgoing connections section before enabling the module.

"},{"location":"modules/mod_event_pusher_rabbit/#presence-exchange-options","title":"Presence exchange options","text":""},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitpresence_exchangename","title":"modules.mod_event_pusher.rabbit.presence_exchange.name","text":"
  • Syntax: non-empty string
  • Default: \"presence\"
  • Example: name = \"custom_presence_name\"

Defines RabbitMQ presence exchange name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitpresence_exchangetype","title":"modules.mod_event_pusher.rabbit.presence_exchange.type","text":"
  • Syntax: non-empty string
  • Default: \"topic\"
  • Example: type = \"custom_presence_topic\"

Defines RabbitMQ presence exchange type.

"},{"location":"modules/mod_event_pusher_rabbit/#chat-message-options","title":"Chat message options","text":""},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitchat_msg_exchangename","title":"modules.mod_event_pusher.rabbit.chat_msg_exchange.name","text":"
  • Syntax: non-empty string
  • Default: \"chat_msg\"
  • Example: name = \"custom_msg_name\"

Defines RabbitMQ chat message exchange name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitchat_msg_exchangetype","title":"modules.mod_event_pusher.rabbit.chat_msg_exchange.type","text":"
  • Syntax: non-empty string
  • Default: \"topic\"
  • Example: type = \"custom_msg_topic\"

Defines RabbitMQ chat message exchange type.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitchat_msg_exchangesent_topic","title":"modules.mod_event_pusher.rabbit.chat_msg_exchange.sent_topic","text":"
  • Syntax: non-empty string
  • Default: \"chat_msg_sent\"
  • Example: sent_topic = \"custom_sent_topic\"

Defines RabbitMQ chat message sent topic name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitchat_msg_exchangerecv_topic","title":"modules.mod_event_pusher.rabbit.chat_msg_exchange.recv_topic","text":"
  • Syntax: non-empty string
  • Default: \"chat_msg_recv\"
  • Example: recv_topic = \"custom_recv_topic\"

Defines RabbitMQ chat message received topic name.

"},{"location":"modules/mod_event_pusher_rabbit/#group-chat-message-options","title":"Group chat message options","text":""},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitgroupchat_msg_exchangename","title":"modules.mod_event_pusher.rabbit.groupchat_msg_exchange.name","text":"
  • Syntax: non-empty string
  • Default: \"groupchat_msg\"
  • Example: name = \"custom_group_msg_name\"

Defines RabbitMQ group chat message exchange name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitgroupchat_msg_exchangetype","title":"modules.mod_event_pusher.rabbit.groupchat_msg_exchange.type","text":"
  • Syntax: non-empty string
  • Default: \"topic\"
  • Example: type = \"custom_group_msg_topic\"

Defines RabbitMQ group chat message exchange type.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitgroupchat_msg_exchangesent_topic","title":"modules.mod_event_pusher.rabbit.groupchat_msg_exchange.sent_topic","text":"
  • Syntax: non-empty string
  • Default: \"groupchat_msg_sent\"
  • Example: sent_topic = \"custom_group_sent_topic\"

Defines RabbitMQ group chat message sent topic name.

"},{"location":"modules/mod_event_pusher_rabbit/#modulesmod_event_pusherrabbitgroupchat_msg_exchangerecv_topic","title":"modules.mod_event_pusher.rabbit.groupchat_msg_exchange.recv_topic","text":"
  • Syntax: non-empty string
  • Default: \"groupchat_msg_recv\"
  • Example: recv_topic = \"custom_group_recv_topic\"

Defines RabbitMQ group chat message received topic name.

"},{"location":"modules/mod_event_pusher_rabbit/#example-configuration","title":"Example configuration","text":"
[modules.mod_event_pusher.rabbit]\n  presence_exchange.name =\"presence\"\n  presence_exchange.type = \"topic\"\n  chat_msg_exchange.name = \"chat_msg\"\n  chat_msg_exchange.sent_topic = \"chat_msg_sent\"\n  chat_msg_exchange.recv_topic = \"chat_msg_recv\"\n  groupchat_msg_exchange.name = \"groupchat_msg\"\n  groupchat_msg_exchange.sent_topic = \"groupchat_msg_sent\"\n  groupchat_msg_exchange.recv_topic = \"groupchat_msg_recv\"\n
"},{"location":"modules/mod_event_pusher_rabbit/#json-schema-examples","title":"JSON Schema examples","text":"

The different kinds of notifications deliver slightly different messages. The messages are delivered in a JSON format.

"},{"location":"modules/mod_event_pusher_rabbit/#presence-updates","title":"Presence updates","text":"

The JSON format for an online presence update notification is:

{\n    \"user_id\": \"alice@localhost/res1\",\n    \"present\": true\n}\n

For offline presence updates, the present boolean value is set to false:

{\n    \"user_id\": \"alice@localhost/res1\",\n    \"present\": false\n}\n
"},{"location":"modules/mod_event_pusher_rabbit/#sentreceived-messages","title":"Sent/received messages","text":"

The JSON format for a private message notification is:

{\n    \"to_user_id\": \"bob@localhost/res1\",\n    \"message\": \"Hello, Bob\",\n    \"from_user_id\": \"alice@localhost/res1\"\n}\n

The notification is similar for group messages. For example for \"sent\" events:

{\n    \"to_user_id\": \"muc_publish@muc.localhost\",\n    \"message\": \"Hi, Everyone!\",\n    \"from_user_id\": \"bob@localhost/res1\"\n}\n

and for \"received\" events:

{\n    \"to_user_id\": \"bob@localhost/res1\",\n    \"message\": \"Hi, Everyone!\",\n    \"from_user_id\": \"muc_publish@muc.localhost/alice\"\n}\n
"},{"location":"modules/mod_event_pusher_rabbit/#metrics","title":"Metrics","text":"

The module provides some metrics related to RabbitMQ connections and messages as well. Provided metrics:

name type description (when it gets incremented/decremented) [Host, connections_active] spiral A connection to a RabbitMQ server is opened(+1)/closed(-1). [Host, connections_opened] spiral A connection to a RabbitMQ server is opened. [Host, connections_closed] spiral A connection to a RabbitMQ server is closed. [Host, connection_failed ] spiral A try to open a connection to a RabbitMQ server failed. [Host, messages_published] spiral A message to a RabbitMQ server is published. [Host, messages_failed] spiral A message to a RabbitMQ server is rejected. [Host, messages_timeout] spiral A message to a RabbitMQ server timed out (weren't confirmed by the server). [Host, message_publish_time] histogram Amount of time it takes to publish a message to a RabbitMQ server and receive a confirmation. It's measured only for successful messages. [Host, message_payload_size] histogram Size of a message (in bytes) that was published to a RabbitMQ server (including message properties). It's measured only for successful messages.

All the above metrics have a prefix which looks as follows: <xmpp_host>.backends.mod_event_pusher_rabbit.<metric_name>. For example a proper metric name would look like: localhost.backends.mod_event_pusher_rabbit.connections_active

"},{"location":"modules/mod_event_pusher_rabbit/#guarantees","title":"Guarantees","text":"

There are no guarantees. The current implementation uses \"best effort\" approach which means that we don't care if a message is delivered to a RabbitMQ server. If publisher confirms are enabled and a message couldn't be delivered to the server for some reason (the server sent negative acknowledgment/didn't sent it at all or there was a channel exception) the module just updates appropriate metrics and prints some log messages. Notice that there might be situations when a message silently gets lost.

"},{"location":"modules/mod_event_pusher_rabbit/#type-of-exchanges","title":"Type of exchanges","text":"

By default all the exchanges used are of type topic. Using topic exchanges gives a lot of flexibility when binding queues to such an exchange by using # and * in binding keys. But flexibility comes at the cost of performance - imagine a scenario where there are thousands of users and AMQP consumers use binding keys for particular users which look like user_N@host.#. In such case RabbitMQ has to go through all the users in order to find out where a message should be sent to. This operations is proved to be costly. In a load test with 100k users a delay caused by this operation was substantial (about an order of magnitude higher than compared to a load test with 60k users).

If performance is a top priority go for direct exchanges. Using this type of exchanges is proved to work efficiently with 100k users. Keep in mind it gives up flexibility over performance.

"},{"location":"modules/mod_event_pusher_rabbit/#publisher-confirms","title":"Publisher confirms","text":"

By default publisher confirmations are disabled. However, one-to-one confirmations can be enabled (see RabbitMQ connection setup section). When a worker sends a message to a RabbitMQ server it waits for a confirmation from the server before it starts to process next message. This approach allows to introduce backpressure on a RabbitMQ server connection cause the server can reject/not confirm messages when it's overloaded. On the other hand it can cause performance degradation.

"},{"location":"modules/mod_event_pusher_rabbit/#worker-selection-strategy","title":"Worker selection strategy","text":"

The module uses mongoose_wpool for managing worker processes and best_worker strategy, for choosing a worker, is in use by default. Different strategies imply different behaviors of the system.

"},{"location":"modules/mod_event_pusher_rabbit/#event-messages-queuing","title":"Event messages queuing","text":"

When available_worker strategy is in use all the event messages are queued in single worker pool manager process state. When different strategy is set e.g best_worker those messages are placed in worker processes inboxes. Worker selection strategy can be set in rabbit pool configuration.

"},{"location":"modules/mod_event_pusher_rabbit/#event-messages-ordering","title":"Event messages ordering","text":"

None of worker selection strategies ensures that user events will be delivered to a RabbitMQ server properly ordered in time.

"},{"location":"modules/mod_event_pusher_sns/","title":"SNS backend","text":""},{"location":"modules/mod_event_pusher_sns/#module-description","title":"Module Description","text":"

This module is a backend of mod_event_pusher that enables support for the Amazon SNS service. Currently there are 3 available notifications:

  • user presence changed - Carries the user id (bare jid by default) and a boolean field corresponding to the current user online status.
  • private message sent - Carries the user ids (both sender and receiver) along with the message body.
  • group message sent - Carries the user id and the room id (bare jids by default) along with the message body.

All these notifications are sent as a JSON string to Amazon SNS along with custom MessageAttributes (see http://docs.aws.amazon.com/sns/latest/api/API_Publish.html). MessageAttributes can be specified via a plugin module (more details in Options section).

Full topics for notifications (ARN as defined in Amazon Resource Names) are constructed as arn:aws:sns:{region}:{account_id}:{topic} where {region} and {account_id} are substituted with corresponding values from configuration options. {topic} is pulled from configuration option presence_updates_topic, pm_messages_topic or muc_messages_topic based on the notification type.

"},{"location":"modules/mod_event_pusher_sns/#options","title":"Options","text":""},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspresence_updates_topic","title":"modules.mod_event_pusher.sns.presence_updates_topic","text":"
  • Syntax: string
  • Default: no default is given
  • Example: presence_updates_topic = \"user_presence_updated\"

Defines Amazon SNS Topic for presence change notifications. Remove this option to disable these notifications.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspm_messages_topic","title":"modules.mod_event_pusher.sns.pm_messages_topic","text":"
  • Syntax: string
  • Default: no default is given
  • Example: pm_messages_topic = \"user_message_sent\"

Defines Amazon SNS Topic for private message notifications. Remove this option to disable these notifications.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsmuc_messages_topic","title":"modules.mod_event_pusher.sns.muc_messages_topic","text":"
  • Syntax: string
  • Default: no default is given
  • Example: muc_messages_topic = \"user_messagegroup_sent\"

Defines Amazon SNS Topic for group message notifications. Remove this option to disable these notifications.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsplugin_module","title":"modules.mod_event_pusher.sns.plugin_module","text":"
  • Syntax: string
  • Default: \"mod_event_pusher_sns_defaults\"
  • Example: plugin_module = \"mod_event_pusher_sns_defaults\"

Sets a callback module used for creating user's GUID used in notifications (from user's JID) and for defining custom attributes attached to a published SNS message.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnssns_host","title":"modules.mod_event_pusher.sns.sns_host","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: sns_host = \"sns.eu-west-1.amazonaws.com\"

URL to the Amazon SNS service. The URL may be in virtual host form, and for AWS needs to point at a specific regional endpoint. The scheme, port and path specified in the URL will be used to publish notifications via HTTP POST method.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsregion","title":"modules.mod_event_pusher.sns.region","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: region = \"eu-west-1\"

The AWS region to use for requests.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsaccess_key_id","title":"modules.mod_event_pusher.sns.access_key_id","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: access_key_id = \"AKIAIOSFODNN7EXAMPLE\"

ID of the access key to use for authorization.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnssecret_access_key","title":"modules.mod_event_pusher.sns.secret_access_key","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"

Secret access key to use for authorization.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnsaccount_id","title":"modules.mod_event_pusher.sns.account_id","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: account_id = \"123456789012\"

12 digit number as defined in AWS Account Identifiers to use for creating TopicArn for publishing notifications.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspool_size","title":"modules.mod_event_pusher.sns.pool_size","text":"
  • Syntax: positive integer
  • Default: 100
  • Example: pool_size = 100

Worker pool size for publishing notifications

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspublish_retry_count","title":"modules.mod_event_pusher.sns.publish_retry_count","text":"
  • Syntax: non-negative integer
  • Default: 2
  • Example: publish_retry_count = 2

Retry count in case of a publish error.

"},{"location":"modules/mod_event_pusher_sns/#modulesmod_event_pushersnspublish_retry_time_ms","title":"modules.mod_event_pusher.sns.publish_retry_time_ms","text":"
  • Syntax: non-negative integer
  • Default: 50
  • Example: publish_retry_time_ms = 50

Base exponential backoff time (in ms) for publish errors.

"},{"location":"modules/mod_event_pusher_sns/#example-configuration","title":"Example configuration","text":"
[modules.mod_event_pusher.sns]\n  presence_updates_topic = \"user_presence_updated\"\n  pm_messages_topic = \"user_message_sent\"\n  muc_messages_topic = \"user_messagegroup_sent\"\n  sns_host = \"sns.eu-west-1.amazonaws.com\"\n  region = \"eu-west-1\"\n  access_key_id = \"AKIAIOSFODNN7EXAMPLE\"\n  secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\n  account_id = \"123456789012\"\n
"},{"location":"modules/mod_event_pusher_sns/#json-schema-examples","title":"JSON Schema examples","text":"

The different kinds of notifications deliver slightly different messages. The messages are delivered in a JSON format.

"},{"location":"modules/mod_event_pusher_sns/#presence-updates","title":"Presence updates","text":"

The JSON format for an online presence update notification is:

{\n    \"user_id\": \"alice@localhost\",\n    \"present\": true\n}\n

For offline presence updates, the present boolean value is set to false:

{\n    \"user_id\": \"alice@localhost\",\n    \"present\": false\n}\n
"},{"location":"modules/mod_event_pusher_sns/#sent-messages","title":"Sent messages","text":"

The JSON format for a private message notification is:

{\n    \"to_user_id\": \"bob@localhost\",\n    \"message\": \"Hello, Bob\",\n    \"from_user_id\": \"alice@localhost\"\n}\n

The notification is similar for group messages except that the to_user_id is the recipient room JID. For example:

{\n    \"to_user_id\": \"muc_publish@muc.localhost\",\n    \"message\": \"Hi, Everyone!\",\n    \"from_user_id\": \"bob@localhost\"\n}\n
"},{"location":"modules/mod_extdisco/","title":"mod_extdisco","text":""},{"location":"modules/mod_extdisco/#module-description","title":"Module Description","text":"

Implements XEP-0215: External Service Discovery for discovering information about services external to the XMPP network. The main use-case is to help discover STUN/TURN servers to allow for negotiating media exchanges.

"},{"location":"modules/mod_extdisco/#options","title":"Options","text":""},{"location":"modules/mod_extdisco/#modulesmod_extdiscoiqdisctype","title":"modules.mod_extdisco.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming IQ stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservice","title":"modules.mod_extdisco.service","text":"
  • Syntax: TOML array with one table for each advertised service - see below for details.
  • Default: [] - no services advertised
"},{"location":"modules/mod_extdisco/#service-options","title":"Service options","text":"

Each advertised service is specified as a TOML table containing the following options listed below.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservicetype","title":"modules.mod_extdisco.service.type","text":"
  • Syntax: string
  • Default: none, this option is required
  • Example: type = \"stun\"

Service type, common values are \"stun\", \"turn\", \"ftp\".

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservicehost","title":"modules.mod_extdisco.service.host","text":"
  • Syntax: string
  • Default: none, this option is required
  • Example: host = \"192.168.0.2\"

Hostname or an IP address where the service is hosted.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoserviceport","title":"modules.mod_extdisco.service.port","text":"
  • Syntax: integer, between 0 and 65535, non-inclusive
  • Default: none, this option is recommended
  • Example: port = 3478

The communications port to be used at the host.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservicetransport","title":"modules.mod_extdisco.service.transport","text":"
  • Syntax: string, one of \"udp\", \"tcp\"
  • Default: none, this option is optional
  • Example: transport = \"udp\"

The underlying transport protocol to be used when communicating with the service.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoserviceusername","title":"modules.mod_extdisco.service.username","text":"
  • Syntax: string
  • Default: none, this option is optional
  • Example: username = \"username\"

A service-generated username for use at the service.

"},{"location":"modules/mod_extdisco/#modulesmod_extdiscoservicepassword","title":"modules.mod_extdisco.service.password","text":"
  • Syntax: string
  • Default: none, this option is optional
  • Example: password = \"password\"

A service-generated password for use at the service.

"},{"location":"modules/mod_extdisco/#example-configuration","title":"Example Configuration","text":"
[modules.mod_extdisco]\n\n  [[modules.mod_extdisco.service]]\n    type = \"stun\"\n    host = \"127.0.0.1\"\n    port = 3478\n    transport = \"udp\"\n    username = \"username\"\n    password = \"password\"\n\n  [[modules.mod_extdisco.service]]\n    type = \"stun\"\n    host = \"stun.host.com\"\n    port = 3478\n    transport = \"tcp\"\n    username = \"username2\"\n    password = \"password2\"\n\n  [[modules.mod_extdisco.service]]\n    type = \"turn\"\n    host = \"turn.host.com\"\n
"},{"location":"modules/mod_global_distrib/","title":"mod_global_distrib","text":""},{"location":"modules/mod_global_distrib/#module-description","title":"Module Description","text":"

This module enables global distribution of a single XMPP domain. With mod_global_distrib, multiple distinct MongooseIM clusters can share a single domain name and route messages to the specific datacenter where the recipient is available.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_global_distrib/#how-it-works","title":"How it works","text":"

There are multiple subsystems that cooperate to enable global distribution:

"},{"location":"modules/mod_global_distrib/#metadata-sharing","title":"Metadata sharing","text":"

Sharing of metadata is done by leveraging a database with cross-datacenter replication. Currently, only Redis is supported, with Dynomite layer for replication. The most important metadata stored in the database is a session/routing table. The table stores mappings between currently logged users' JIDs and datacenters on which they are logged in. Because access to the session table is very frequent, its entries are additionally cached on each node.

To preserve consistency between database instances, all data is stored with a set expiration time and is periodically refreshed. Each node of each cluster is responsible for refreshing its own data. Thus, in an event of a netsplit, datacenters' information about unreachable datacenters' users will expire, as those users are now unreachable; but once the connection is reestablished, the data will be replicated again as datacenters refresh their entries. Additionally, to prevent edge cases where an incoming message is received and replied to before the datacenter learns about the sender's host, an incoming message also carries information about its origin which may be used to temporarily update the local routing table.

"},{"location":"modules/mod_global_distrib/#redis-entries","title":"Redis entries","text":"

Following structures are stored in Redis:

  • JID mappings are stored as normal key-value entries, where user's JID (full and bare) is the key, and the value is the local hostname where the user is logged in. Example: \"user1@example.com/res\" -> \"dc2.example.com\".
  • Domains of components and services registered on the globally distributed host are stored in per-node set structures where the key is <local_host>#<node_name>#{domains}, and the values are the domain names. Example: \"dc1.example.com#mongoose1@dc1.example.com#{domains}\" -> {\"muc1.example.com\", \"muc2.example.com\"}.
  • Domains of non-hidden components and services (see the XMPP Components documentation) are stored in per-node set structures where the key is <local_host>#<node_name>#{public_domains}, and the values are the domain names.
  • Declared endpoints available on a node are similarly stored in a per-node set structure where the key is <local_host>#<node_name>#{endpoints} and the values represent the TCP endpoints of the node. Example: \"dc1.example.com#mongoose1@dc1.example.com#{endpoints}\" -> {\"172.16.2.14#8231\", \"2001:0db8:85a3:0000:0000:8a2e:0370:7334#8882\"}.
  • Nodes that comprise a host are stored in a set structure with key <local_host>#{nodes} and values being the names of the nodes. Example: \"dc2.example.com#{nodes}\" -> {\"node1@dc2.example.com\", \"node3@dc2.example.com\"}.
  • Hosts are stored in a set with key hosts and values being the individual local XMPP domains. Example: \"hosts\" -> {\"dc1.example.com\", \"dc2.example.com\"}.
"},{"location":"modules/mod_global_distrib/#message-routing","title":"Message routing","text":"

mod_global_distrib establishes its own listeners and dedicated TCP/TLS connections for message routing. Each node listens on preconfigured endpoints, where each node in a datacenter can have any number of endpoints, including none. The endpoints are shared between all datacenters. If a node becomes unavailable, its endpoint entries in the database will expire and will be read once the node comes back online.

Connections between nodes in distinct datacenters are opened on the first request and then maintained as long as the destination endpoint is present in Redis. When a node needs to connect to a remote cluster, specified number of connections are opened to every endpoint reported by that datacenter. Global distribution features automatic rebalancing feature that will \"disable\" connections when their respective endpoints disappear from Redis. A new pool of connections is created each time a new endpoint is recognised. Whenever a node receives a message that is determined (by consulting the session table) to be destined for another datacenter, the routing procedure in the current datacenter is interrupted, the message is transported to the other datacenter via the dedicated connections, and the routing procedure is restarted there by a dedicated (but potentially short lived) worker process bound to the sender's JID (or subdomain if the sender's JIDs does not belong to the globally distributed domain). Client's process binds itself to a connection to a remote datacenter on first use, and henceforth always uses this connection to route messages directed to this datacenter. This - along with the dedicated worker process on the receiver's side - ensures that simple cross-datacenter messages between two entities are delivered in their sending order.

It may happen that a message is rerouted through multiple datacenters (e.g. if the user has reconnected to a different datacenter while the message was already in flight). Messages are given a TTL parameter by the source datacenter so that they cannot be rerouted indefinitely. The TTL is decreased on each reroute. Note that in the edge case of multi-datacenter routing, the messages may be received out-of-order at the destination datacenter.

"},{"location":"modules/mod_global_distrib/#bounce","title":"Bounce","text":"

Consider the following edge case: user U1 logged into datacenter DC2 and then quickly reconnected to datacenter DC3. Because session table has not yet been replicated, DC2 does not see U1 in the session table, while a different datacenter DC1 still sees U1 logged into DC2. When U2 logged into DC1 and sent a message to U1, it will now be rerouted to DC2 even though the user is now available at DC3.

Bounce mechanism solves this and similar edge cases by storing messages for which there is no known routing in the current datacenter. The stored messages are then assigned a bounce-TTL value and periodically - with backoff - are attempted to be routed again. In the example above, the message from U2 would be temporarily stored at DC2 and rerouted successfully once DC2 learns (via replication) that U1 is available at DC3.

Note

Bounce mechanism, similarly to multi-datacenter routing, may result in out-of-order messages being received at the destination datacenter.

"},{"location":"modules/mod_global_distrib/#metrics","title":"Metrics","text":"

Global distribution modules expose several per-datacenter metrics that can be used to monitor health of the system. All metrics begin with global.mod_global_distrib prefix:

  • outgoing.messages.<host>: number of cross-datacenter messages sent by this cluster to a given host.
  • incoming.messages.<host>: number of cross-datacenter messages received by this cluster from a given host.
  • incoming.transfer_time.<host> [us]: time elapsed between sending and receiving the message over the network from a given host. The duration is calculated using wall clock times on sender and receiver node.
  • outgoing.queue_time.<host> [us]: time elapsed while message waits in a queue of a sender's connection to a given host. High value of this metric may be remedied by increasing the number of connections to other hosts.
  • incoming.queue_time [us]: time elapsed while message waits in routing worker's queue. This value is not reported per-host as routing workers are bound to the sender's JID.
  • incoming.established: incremented when a new connection is established from another cluster. At this point the origin domain of the cluster is not known, so this metric is common for all of them.
  • incoming.first_packet.<host>: incremented when a receiver process gets the first packet from a remote cluster and learns its local domain.
  • incoming.closed.<host>: incremented when an incoming connection gets closed.
  • incoming.errored.<host>: incremented when an incoming connection gets closed with an error.
  • outgoing.established.<host>: incremented when an outgoing connection is established.
  • outgoing.closed.<host>: incremented when an outgoing connection gets closed.
  • outgoing.errored.<host>: incremented when an outgoing connection gets closed with an error.
  • mapping_fetch_time [us]: time spent on fetching an entry from the session table, cached or otherwise.
  • mapping_fetches: number of fetches of session table entries, cached or otherwise.
  • mapping_cache_misses: number of fetches of session table entries that hit the database.
  • delivered_with_ttl: A histogram of packets' TTL values recorded when the global routing layer decides to route them locally (but not due to TTL = 0).
  • stop_ttl_zero: A number of packets that weren't processed by global routing due to TTL=0.
  • bounce_queue_size: a number of messages enqueued for rerouting (the value of this metric is individual per MongooseIM node!).
"},{"location":"modules/mod_global_distrib/#notes","title":"Notes","text":"
  • You should only start mod_global_distrib by configuring it under modules option in mongooseim.toml. Do not add it as host-specific module via host_config.
  • Do not use mod_offline on domains given via global_host or local_host options, as it will decrease messaging robustness; the users logged in other datacenters will not be registered as available by mod_offline, and so the messages will not be flushed.
"},{"location":"modules/mod_global_distrib/#options","title":"Options","text":""},{"location":"modules/mod_global_distrib/#modulesmod_global_distribglobal_host","title":"modules.mod_global_distrib.global_host","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: global_host = \"example.com\"

The XMPP domain that will be shared between datacenters.

Note

This needs to be one of the domains given in general.hosts option in mongooseim.toml.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distriblocal_host","title":"modules.mod_global_distrib.local_host","text":"
  • Syntax: string
  • Default: none, this option is mandatory
  • Example: local_host = \"datacenter1.example.com\"

XMPP domain that maps uniquely to the local datacenter; it will be used for inter-center routing.

Note

This needs to be one of the domains given in general.hosts option in mongooseim.toml.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribmessage_ttl","title":"modules.mod_global_distrib.message_ttl","text":"
  • Syntax: non-negative integer
  • Default: 4
  • Example: message_ttl = 5

Number of times a message can be rerouted between datacenters.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribhosts_refresh_interval","title":"modules.mod_global_distrib.hosts_refresh_interval","text":"
  • Syntax: non-negative integer, value given in milliseconds
  • Default: 3000
  • Example: hosts_refresh_interval = 3000

The interval telling how often Redis should be asked if new hosts appeared.

"},{"location":"modules/mod_global_distrib/#connections-options","title":"Connections' options","text":""},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsendpoints","title":"modules.mod_global_distrib.connections.endpoints","text":"
  • Syntax: Array of TOML tables with the following keys: host and port, and the following values: {host = string, port = non_negative_integer}
  • Default: [{host = \"LocalHost\", port = 5555}]
  • Example: endpoints = [{host = \"172.16.0.2\", port = 5555}]

A list of endpoints on which the server will listen for connections. host can be given as a hostname, in which case it will be resolved to an IP address on module start. The endpoint list will be shared with other datacenters via the replicated backend.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsadvertised_endpoints","title":"modules.mod_global_distrib.connections.advertised_endpoints","text":"
  • Syntax: Array of TOML tables with the following keys: host and port, and the following values: {host = string, port = non_negative_integer}
  • Default: not set, the value of endpoints is used (without resolution).
  • Example: advertised_endpoints = [{host = \"172.16.0.2\", port = 5555}]

A list of endpoints which will be advertised in Redis and therefore used to establish connection with this node by other nodes. The host may be either IP or domain, just like in case of endpoints. The difference is, the domain name won't be resolved but inserted directly to the mappings backend instead.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsconnections_per_endpoint","title":"modules.mod_global_distrib.connections.connections_per_endpoint","text":"
  • Syntax: non-negative integer
  • Default: 1
  • Example: connections_per_endpoint = 30

Number of outgoing connections that will be established from the current node to each endpoint assigned to a remote domain.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsendpoint_refresh_interval","title":"modules.mod_global_distrib.connections.endpoint_refresh_interval","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 60
  • Example: endpoint_refresh_interval = 30

An interval between remote endpoint list refresh (and connection rebalancing). A separate timer is maintained for every remote domain.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsendpoint_refresh_interval_when_empty","title":"modules.mod_global_distrib.connections.endpoint_refresh_interval_when_empty","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 3
  • Example: endpoint_refresh_interval_when_empty = 3

Endpoint refresh interval, when array of endpoints is empty.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionsdisabled_gc_interval","title":"modules.mod_global_distrib.connections.disabled_gc_interval","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 60
  • Example: disabled_gc_interval = 60

An interval between disabled endpoints \"garbage collection\". It means that disabled endpoints are periodically verified and if Global Distribution detects that connections is no longer alive, the connection pool is closed completely.

"},{"location":"modules/mod_global_distrib/#tls-options","title":"TLS options","text":"

Note

By default tls is disabled and all data will be sent via standard TCP connections.

To enable TLS support, the cacertfile and certfile options have to be present. These options will be passed to the fast_tls driver.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionstlscertfile","title":"modules.mod_global_distrib.connections.tls.certfile","text":"
  • Syntax: string, path in the file system
  • Default: none, this options is mandatory to enable TLS support
  • Example: certfile = \"priv/dc1.pem\"
"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionstlscacertfile","title":"modules.mod_global_distrib.connections.tls.cacertfile","text":"
  • Syntax: string, path in the file system
  • Default: none, this options is mandatory to enable TLS support
  • Example: cacertfile = \"priv/ca.pem\"
"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionstlsciphers","title":"modules.mod_global_distrib.connections.tls.ciphers","text":"
  • Syntax: string
  • Default: \"TLSv1.2:TLSv1.3\"
  • Example: ciphers = \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384\"

Cipher suites to use with StartTLS or TLS. Please refer to the OpenSSL documentation for the cipher string format.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribconnectionstlsdhfile","title":"modules.mod_global_distrib.connections.tls.dhfile","text":"
  • Syntax: string, path in the file system
  • Default: not set
  • Example: dhfile = \"dh.pem\"
"},{"location":"modules/mod_global_distrib/#redis-session-storage-options","title":"Redis session storage options","text":""},{"location":"modules/mod_global_distrib/#modulesmod_global_distribredispool","title":"modules.mod_global_distrib.redis.pool","text":"
  • Syntax: string
  • Default: \"global_distrib\"
  • Example: pool = \"global_distrib\"

Name of the redis pool defined in outgoing pools.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribredisexpire_after","title":"modules.mod_global_distrib.redis.expire_after","text":"
  • Syntax: positive integer
  • Default: 120
  • Example: expire_after = 120

Number of seconds after which a session entry written by this cluster will expire.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribredisrefresh_after","title":"modules.mod_global_distrib.redis.refresh_after","text":"
  • Syntax: non-negative integer
  • Default: 60
  • Example: refresh_after = 60

Number of seconds after which session's expiration timer will be refreshed.

"},{"location":"modules/mod_global_distrib/#database-cache-options","title":"Database cache options","text":"

Options for caching database lookups, by default no options are passed.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribcachecache_missed","title":"modules.mod_global_distrib.cache.cache_missed","text":"
  • Syntax: boolean
  • Default: true
  • Example: cache_missed = true

Determines whether an internal session cache should cache lookup failures. When false, only successful database lookups will result in the value being cached. Changing this option has great negative impact on performance.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribcachedomain_lifetime_seconds","title":"modules.mod_global_distrib.cache.domain_lifetime_seconds","text":"
  • Syntax: non-negative integer, value given in seconds
  • Default: 600
  • Example: domain_lifetime_seconds = 600

How long should subdomain mappings be cached (e.g. muc.example.com -> datacenter1.test).

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribcachejid_lifetime_seconds","title":"modules.mod_global_distrib.cache.jid_lifetime_seconds","text":"
  • Syntax: non-negative integer, value given in seconds
  • Default: 5
  • Example: jid_lifetime_seconds = 5

How long should full and bare JID mappings be cached (e.g. user1@example.com/res1 -> datacenter1.test).

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribcachemax_jids","title":"modules.mod_global_distrib.cache.max_jids","text":"
  • Syntax: non-negative integer
  • Default: 10000
  • Example: max_jids = 10000

The maximum number of JID entries that can be stored in cache at any point in time.

"},{"location":"modules/mod_global_distrib/#message-bouncing-options","title":"Message bouncing options","text":""},{"location":"modules/mod_global_distrib/#modulesmod_global_distribbounceenabled","title":"modules.mod_global_distrib.bounce.enabled","text":"
  • Syntax: boolean
  • Default: true
  • Example: enabled = false

Whether message bouncing should be enabled or not. Setting this option to false makes other bounce options have no effect.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribbounceresend_after_ms","title":"modules.mod_global_distrib.bounce.resend_after_ms","text":"
  • Syntax: non-negative integer
  • Default: 200
  • Example: resend_after_ms = 200

Time after which message will be resent in case of delivery error.

"},{"location":"modules/mod_global_distrib/#modulesmod_global_distribbouncemax_retries","title":"modules.mod_global_distrib.bounce.max_retries","text":"
  • Syntax: non-negative integer
  • Default: 4
  • Example: max_retries = 4

Number of times message delivery will be retried in case of errors.

"},{"location":"modules/mod_global_distrib/#global-distribution-and-service-discovery","title":"Global Distribution and Service Discovery","text":"

mod_global_distrib extension relies on mod_disco's option users_can_see_hidden_services, when provided. If it is not configured, the default value is true. mod_disco does not have to be enabled for mod_global_distrib to work, as this parameter is used only for processing Disco requests by Global Distribution.

"},{"location":"modules/mod_global_distrib/#example-configuration","title":"Example configuration","text":""},{"location":"modules/mod_global_distrib/#configuring-mod_global_distrib","title":"Configuring mod_global_distrib","text":"
[modules.mod_global_distrib]\n  global_host = \"example.com\"\n  local_host = \"datacenter1.example.com\"\n  connections.endpoints = [{host = \"172.16.0.2\", port = 5555}]\n  connections.advertised_endpoints = [{host = \"172.16.0.2\", port = 5555}]\n  connections.tls.certfile = \"priv/dc1.pem\"\n  connections.tls.cacertfile = \"priv/ca.pem\"\n  connections.connections_per_endpoint = 30\n  cache.domain_lifetime_seconds = 60\n  bounce.resend_after_ms = 300\n  bounce.max_retries = 3\n  redis.pool = \"global_distrib\"\n
"},{"location":"modules/mod_global_distrib/#configuring-dynomite","title":"Configuring Dynomite","text":"

For more information about Dynomite configuration, consult Dynomite wiki.

dyn_o_mite:\n  datacenter: dc1\n  rack: rack1\n  dyn_listen: 172.16.0.3:8101\n  dyn_seeds:\n  - 124.12.4.4:8101:rack1:dc2:1383429731\n  listen: 172.16.0.3:8102\n  servers:\n  - 172.16.0.4:6379:1\n  tokens: '138342973'\n  secure_server_option: datacenter\n  pem_key_file: dynomite.pem\n  data_store: 0\n  stats_listen: 0.0.0.0:22221\n
dyn_o_mite:\n  datacenter: dc2\n  rack: rack1\n  dyn_listen: 124.12.4.4:8101\n  dyn_seeds:\n  - 172.16.0.3:8101:rack1:dc1:1383429731\n  listen: 124.12.4.4:8102\n  servers:\n  - 124.12.4.5:6379:1\n  tokens: '138342973'\n  secure_server_option: datacenter\n  pem_key_file: dynomite.pem\n  data_store: 0\n  stats_listen: 0.0.0.0:22221\n
"},{"location":"modules/mod_http_upload/","title":"mod_http_upload","text":""},{"location":"modules/mod_http_upload/#module-description","title":"Module Description","text":"

This module implements XEP-0363: HTTP File Upload, version 0.3.0+. It enables a service that on user request creates an upload \"slot\". A slot is a pair of URLs, one of which can be used with a PUT method to upload a user's file, the other with a GET method to retrieve such file.

Currently, the module supports only the S3 backend using AWS Signature Version 4.

"},{"location":"modules/mod_http_upload/#options","title":"Options","text":""},{"location":"modules/mod_http_upload/#modulesmod_http_uploadiqdisctype","title":"modules.mod_http_upload.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadhost","title":"modules.mod_http_upload.host","text":"
  • Syntax: string
  • Default: \"upload.@HOST@\"
  • Example: host = \"upload.@HOST@\"

Subdomain for the upload service to reside under. @HOST@ is replaced with each served domain.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadbackend","title":"modules.mod_http_upload.backend","text":"
  • Syntax: non-empty string
  • Default: \"s3\"
  • Example: backend = \"s3\"

Backend to use for generating slots. Currently only \"s3\" can be used.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadexpiration_time","title":"modules.mod_http_upload.expiration_time","text":"
  • Syntax: positive integer
  • Default: 60
  • Example: expiration_time = 120

Duration (in seconds) after which the generated PUT URL will become invalid.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadtoken_bytes","title":"modules.mod_http_upload.token_bytes","text":"
  • Syntax: positive integer
  • Default: 32
  • Example: token_bytes = 32

Number of random bytes of a token that will be used in a generated URL. The text representation of the token will be twice as long as the number of bytes, e.g. for the default value the token in the URL will be 64 characters long.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploadmax_file_size","title":"modules.mod_http_upload.max_file_size","text":"
  • Syntax: positive integer
  • Default: not set - no size limit
  • Example: max_file_size = 10485760

Maximum file size (in bytes) accepted by the module.

"},{"location":"modules/mod_http_upload/#modulesmod_http_uploads3","title":"modules.mod_http_upload.s3","text":"
  • Syntax: Array of TOML tables. See description.
  • Default: see description
  • Example: see description

Options specific to S3 backend.

Note

This section is mandatory.

"},{"location":"modules/mod_http_upload/#s3-backend-options","title":"S3 backend options","text":""},{"location":"modules/mod_http_upload/#s3bucket_url","title":"s3.bucket_url","text":"
  • Syntax: non-empty string
  • Default: none, this option is mandatory
  • Example: s3.bucket_url = \"https://s3-eu-west-1.amazonaws.com/mybucket\"

A complete URL pointing at the used bucket. The URL may be in virtual host form, and for AWS it needs to point to a specific regional endpoint for the bucket. The scheme, port and path specified in the URL will be used to create PUT URLs for slots, e.g. specifying a value of \"https://s3-eu-west-1.amazonaws.com/mybucket/custom/prefix\" will result in PUT URLs of form \"https://s3-eu-west-1.amazonaws.com/mybucket/custom/prefix/<RANDOM_TOKEN>/<FILENAME>?<AUTHENTICATION_PARAMETERS>\".

"},{"location":"modules/mod_http_upload/#s3add_acl","title":"s3.add_acl","text":"
  • Syntax: boolean
  • Default: false
  • Example: s3.add_acl = true

If true, adds x-amz-acl: public-read header to the PUT URL. This allows users to read the uploaded files even if the bucket is private. The same header must be added to the PUT request.

"},{"location":"modules/mod_http_upload/#s3region","title":"s3.region","text":"
  • Syntax: string
  • Default: \"\", this option is mandatory
  • Example: s3.region = \"https://s3-eu-west-1.amazonaws.com/mybucket\"

The AWS region to use for requests.

"},{"location":"modules/mod_http_upload/#s3access_key_id","title":"s3.access_key_id","text":"
  • Syntax: string
  • Default: \"\", this option is mandatory
  • Example: s3.access_key_id = \"AKIAIOSFODNN7EXAMPLE\"

ID of the access key to use for authorization.

"},{"location":"modules/mod_http_upload/#s3secret_access_key","title":"s3.secret_access_key","text":"
  • Syntax: string
  • Default: \"\", this option is mandatory
  • Example: s3.secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"

Secret access key to use for authorization.

"},{"location":"modules/mod_http_upload/#example-configuration","title":"Example configuration","text":"
[modules.mod_http_upload]\n  host = \"upload.@HOST@\"\n  backend = \"s3\"\n  expiration_time = 120\n  s3.bucket_url = \"https://s3-eu-west-1.amazonaws.com/mybucket\"\n  s3.region = \"eu-west-1\"\n  s3.add_acl = true     \n  s3.access_key_id = \"AKIAIOSFODNN7EXAMPLE\"\n  s3.secret_access_key = \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\n
"},{"location":"modules/mod_http_upload/#testing-s3-configuration","title":"Testing S3 configuration","text":"

Since there is no direct connection between MongooseIM and an S3 bucket, it is not possible to verify the provided S3 credentials during startup. However, the testing can be done manually. MongooseIM provides a dedicated mongooseimctl httpUpload getUrl command for the manual URLs generation. It requires the following arguments:

  • domain - XMPP host name.
  • filename - Name of the file.
  • size - Size of the file in bytes (positive integer).
  • contentType - Content-Type.
  • timeout - Duration (in seconds, positive integer) after which the generated PUT URL will become invalid. This argument shadows the expiration_time configuration.

The generated URLs can be used to upload/download a file using the curl utility:

# Create some text file\necho qwerty > tmp.txt\n\n# Get the size of the file\nfilesize=\"$(wc -c tmp.txt | awk '{print $1}')\"\n\n# Set the content type\ncontent_type=\"text/plain\"\n\n# Generate upload/download URLs\nurls=\"$(./mongooseimctl httpUpload getUrl --domain localhost --filename test.txt --size \"$filesize\" --contentType \"$content_type\" --timeout 600)\"\nput_url=\"$(echo \"$urls\" | awk '/PutURL:/ {print $2}')\"\nget_url=\"$(echo \"$urls\" | awk '/GetURL:/ {print $2}')\"\n\n# Try to upload a file. Note that if 'add_acl' option is\n# enabled, then you must also add 'x-amz-acl' header:\n#    -H \"x-amz-acl: public-read\"\ncurl -v -T \"./tmp.txt\" -H \"Content-Type: $content_type\" \"$put_url\"\n\n# Try to download a file\ncurl -i \"$get_url\"\n
"},{"location":"modules/mod_http_upload/#using-s3-backend-with-minio","title":"Using S3 backend with min.io","text":"

min.io doesn't support ObjectACL, so enabling add_acl makes no sense. The bucket policies must be used instead, it is enough to set the bucket policy to download.

Please note that there is no error if you keep add_acl enabled. min.io just ignores the x-amz-acl header. This might be useful to simplify the migration from S3 to min.io

"},{"location":"modules/mod_http_upload/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) create_slot An upload slot is allocated."},{"location":"modules/mod_inbox/","title":"mod_inbox","text":""},{"location":"modules/mod_inbox/#module-description","title":"Module Description","text":"

Inbox is an experimental feature implemented as a few separate modules. It is described in detail as our Open XMPP Extension. To use it, enable mod_inbox in the config file.

"},{"location":"modules/mod_inbox/#options","title":"Options","text":""},{"location":"modules/mod_inbox/#modulesmod_inboxbackend","title":"modules.mod_inbox.backend","text":"
  • Syntax: string, one of \"rdbms\", \"rdbms_async\"
  • Default: \"rdbms\"
  • Example: backend = \"rdbms_async\"

Only RDBMS storage is supported, but rdbms means flushes to DB are synchronous with each message, while rdbms_async is instead asynchronous.

Regular rdbms has worse performance characteristics, but it has better consistency properties, as events aren't lost nor reordered. rdbms_async processes events asynchronously and potentially unloading a lot of aggregation from the DB. Like the case of the asynchronous workers for MAM, it is the preferred method, with the risk messages being lost on an ungraceful shutdown.

"},{"location":"modules/mod_inbox/#modulesmod_inboxasync_writerpool_size","title":"modules.mod_inbox.async_writer.pool_size","text":"
  • Syntax: non-negative integer
  • Default: 2 * erlang:system_info(schedulers_online)
  • Example: modules.mod_inbox.async_writer.pool_size = 32

Number of workers in the pool. More than the number of available schedulers is recommended, to minimise lock contention on the message queues, and more than the number of DB workers, to fully utilise the DB capacity. How much more than these two parameters is then a good fine-tuning for specific deployments.

"},{"location":"modules/mod_inbox/#modulesmod_inboxboxes","title":"modules.mod_inbox.boxes","text":"
  • Syntax: array of strings.
  • Default: []
  • Example: [\"classified\", \"spam\"]

A list of supported inbox boxes by the server. This can be used by clients to classify their inbox entries in any way that fits the end-user. The strings provided here will be used verbatim in the IQ query as described in Inbox \u2013 Filtering and Ordering.

Note

inbox, archive, and bin are reserved box names and are always enabled, therefore they don't need to \u2013and must not\u2013 be specified in this section. all has a special meaning in the box query and therefore is also not allowed as a box name.

If the asynchronous backend is configured, automatic removals become moves to the bin box, also called \"Trash bin\". This is to ensure eventual consistency. Then the bin can be emptied, either on a user request, with the mongooseimctl inbox command, through the GraphQL API, or through the REST API.

"},{"location":"modules/mod_inbox/#modulesmod_inboxbin_ttl","title":"modules.mod_inbox.bin_ttl","text":"
  • Syntax: non-negative integer, expressed in days.
  • Default: 30
  • Example: modules.mod_inbox.bin_ttl = 7

How old entries in the bin can be before the automatic bin cleaner collects them. A value of 7 would mean that entries that have been in the bin for more than 7 days will be cleaned on the next bin collection.

"},{"location":"modules/mod_inbox/#modulesmod_inboxbin_clean_after","title":"modules.mod_inbox.bin_clean_after","text":"
  • Syntax: non-negative integer, expressed in hours
  • Default: 1
  • Example: modules.mod_inbox.bin_clean_after = 24

How often the automatic garbage collection runs over the bin.

"},{"location":"modules/mod_inbox/#modulesmod_inboxdelete_domain_limit","title":"modules.mod_inbox.delete_domain_limit","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: modules.mod_inbox.delete_domain_limit = 10000

Domain deletion can be an expensive operation, as it requires to delete potentially many thousands of records from the DB. By default, the delete operation deletes everything in a transaction, but it might be desired, to handle timeouts and table locks more gracefully, to delete the records in batches. This limit establishes the size of the batch.

Note

Not supported by MSSQL.

"},{"location":"modules/mod_inbox/#modulesmod_inboxreset_markers","title":"modules.mod_inbox.reset_markers","text":"
  • Syntax: array of strings, out of \"displayed\", \"received\", \"acknowledged\"
  • Default: [\"displayed\"]
  • Example: reset_markers = [\"received\"]

List of chat markers that when sent, will reset the unread message counter for a conversation. This works when Chat Markers are enabled on the client side. Setting as empty list (not recommended) means that no chat marker can decrease the counter value.

"},{"location":"modules/mod_inbox/#modulesmod_inboxgroupchat","title":"modules.mod_inbox.groupchat","text":"
  • Syntax: array of strings
  • Default: [\"muclight\"]
  • Example: groupchat = [\"muclight\"]

The list indicating which groupchats will be included in inbox. Possible values are muclight Multi-User Chat Light or muc Multi-User Chat.

"},{"location":"modules/mod_inbox/#modulesmod_inboxaff_changes","title":"modules.mod_inbox.aff_changes","text":"
  • Syntax: boolean
  • Default: true
  • Example: aff_changes = true

Use this option when muclight is enabled. Indicates if MUC Light affiliation change messages should be included in the conversation inbox. Only changes that affect the user directly will be stored in their inbox.

"},{"location":"modules/mod_inbox/#modulesmod_inboxremove_on_kicked","title":"modules.mod_inbox.remove_on_kicked","text":"
  • Syntax: boolean
  • Default: true
  • Example: remove_on_kicked = true

Use this option when muclight is enabled. If true, the inbox conversation is removed for a user when they are removed from the groupchat.

"},{"location":"modules/mod_inbox/#modulesmod_inboxiqdisctype","title":"modules.mod_inbox.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_inbox/#modulesmod_inboxmax_result_limit","title":"modules.mod_inbox.max_result_limit","text":"
  • Syntax: the string \"infinity\" or a positive integer
  • Default: \"infinity\"
  • Example: modules.mod_inbox.max_result_limit = 100

This option sets the maximum size of returned results when quering inbox. It works in the same manner as setting a limit in iq stanza. The special value infinity means no limit.

"},{"location":"modules/mod_inbox/#note-about-supported-rdbms","title":"Note about supported RDBMS","text":"

mod_inbox executes upsert queries, which have different syntax in every supported RDBMS. Inbox currently supports the following DBs:

  • MySQL via native driver
  • PgSQL via native driver
  • MSSQL via ODBC driver
"},{"location":"modules/mod_inbox/#legacy-muc-support","title":"Legacy MUC support","text":"

Inbox comes with support for the legacy MUC as well. It stores all groupchat messages sent to room in each sender's and recipient's inboxes and private messages. Currently it is not possible to configure it to store system messages like subject or affiliation change.

"},{"location":"modules/mod_inbox/#example-configuration","title":"Example configuration","text":"
[modules.mod_inbox]\n  backend = \"rdbms_async\"\n  reset_markers = [\"displayed\"]\n  aff_changes = true\n  remove_on_kicked = true\n  groupchat = [\"muclight\"]\n
"},{"location":"modules/mod_jingle_sip/","title":"mod_jingle_sip","text":""},{"location":"modules/mod_jingle_sip/#module-description","title":"Module Description","text":"

This module enables Jingle to SIP and SIP to Jingle translation. When this module is enabled, MongooseIM will intercept any Jingle IQ set stanza with action:

  • session-initiate
  • session-terminate
  • session-accept
  • transport-info

and translate it to SIP messages with appropriate SDP content based on the details in the Jingle stanza.

The translation back from SIP to Jingle is done for the following SIP requests:

  • INVITE
  • re-INVITE - INVITE message sent for an accepted session
  • CANCEL
  • BYE
  • INFO

and following responses to the INVITE request:

  • 200 when the call invite was accepted
  • 180 and 183 to indicate that the invitation was sent to the device
  • 486 when the call's recipient rejects it
  • from 400 to 600 - other error codes indicating session termination
"},{"location":"modules/mod_jingle_sip/#jingle-to-sip-translation","title":"Jingle to SIP translation","text":"

The table below summarises the bilateral translation for standard Jingle and SIP messages:

Jingle action SIP message comment session-initiate INVITE request session-accept 200 OK response session-terminate with reason success BYE request Only for accepted session session-terminate with reason decline CANCEL request When sent by call's initiator session-terminate with reason decline 486 Busy Here response When sent by the invite user transport-info INFO request"},{"location":"modules/mod_jingle_sip/#ringing-notification","title":"Ringing notification","text":"

Both Jingle and SIP have the ringing notification. It's generated as a response code 180 Ringing by a SIP entity when the INVITE is sent to the device. In SIP world a 183 Session Progress response code is also generated in some cases. Both 180 and 183 codes are translated as session-info Jingle stanza with ringing sub element. MongooseIM generates only 180 Ringing response code the INVITE request, if the recipient's online. If the recipient is online, MongooseIM generates the 180 Ringing response code to the INVITE request.

"},{"location":"modules/mod_jingle_sip/#recipient-unavailable","title":"Recipient unavailable","text":"

When MongooseIM receives a SIP INVITE request addressed to an offline user, it replies with a 480 Temporarily Unavailable code. The same code is expected from the SIP Proxy when MongooseIM sends the INVITE request.

"},{"location":"modules/mod_jingle_sip/#other-error-codes","title":"Other error codes","text":"

When an error response to the INVITE request is from the range 400 to 699 but not 486, MongooseIM will send a Jingle session-terminate stanza to the call's initiator. The stanza has reason general-error with the SIP error code in the sip-error sub element.

"},{"location":"modules/mod_jingle_sip/#non-standard-jingle-stanzas-used-by-jinglejs","title":"Non-standard Jingle stanzas used by jingle.js","text":"

The following non-standard Jingle stanzas were integrated with Jingle.js:

  • source-remove
  • source-add
  • source-update

When MongooseIM observes the above Jingle stanzas, it will translate them to a SIP in-dialog INVITE request. In the SDP content of the request, there will be a custom attribute a=jingle-action. The value of the custom attribute is one of the three presented above.

Similarly, when MongooseIM gets a SIP in-dialog INVITE request, it will check if there is a custom attribute and use it as the action attribute of the Jingle stanza sent to the user. If there is no such attribute, the action will be set to regular Jingle transport-info.

"},{"location":"modules/mod_jingle_sip/#non-standard-jingle-existing-session-initiate-stanza","title":"Non-standard Jingle existing-session-initiate stanza","text":"

MongooseIM allows a user to ask for an unanswered session-initiate request. This may be useful in web applications when there is a need to handle the call in a new browser window.

In order to get the session-initiate, which was not answered yet, the user can send a get Jingle stanza to self with action set to existing-session-initiate. As a result, MongooseIM will resend the original session-initiate request to the device which sent the query.

"},{"location":"modules/mod_jingle_sip/#prerequisites","title":"Prerequisites","text":"

By default, MongooseIM is built without SIP support. In order to build the server with SIP support, please use tools/configure script before the release generation. You may either pick only certain drivers (with SIP included) or simply use with-all option. Examples:

tools/configure with-mysql with-jingle-sip\ntools/configure with-all without-odbc\ntools/configure with-all\n

MongooseIM packages are built with Jingle/SIP support.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_jingle_sip/#options","title":"Options","text":""},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipbackend","title":"modules.mod_jingle_sip.backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: backend = \"cets\"

Backend for in-memory data for this module.

Warning

The corresponding internal database has to be enabled.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipproxy_host","title":"modules.mod_jingle_sip.proxy_host","text":"
  • Syntax: string
  • Default: \"localhost\"
  • Example: proxy_host = \"localhost\"

The name or IP address of the SIP Proxy to which MongooseIM will send SIP messages.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipproxy_port","title":"modules.mod_jingle_sip.proxy_port","text":"
  • Syntax: non-negative integer
  • Default: 5060
  • Example: proxy_port = 5060

The port of the SIP Proxy.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_siplisten_port","title":"modules.mod_jingle_sip.listen_port","text":"
  • Syntax: non-negative integer
  • Default: 5600
  • Example: listen_port = 5600

The port on which MongooseIM will listen for incoming SIP messages.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_siplocal_host","title":"modules.mod_jingle_sip.local_host","text":"
  • Syntax: string
  • Default: \"localhost\"
  • Example: local_host = \"localhost\"

The value used to create SIP URIs (including VIA headers).

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipsdp_origin","title":"modules.mod_jingle_sip.sdp_origin","text":"
  • Syntax: string
  • Default: \"127.0.0.1\"
  • Example: sdp_origin = \"127.0.0.1\"

The value of the c= SDP attribute.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_siptransport","title":"modules.mod_jingle_sip.transport","text":"
  • Syntax: string
  • Default: \"udp\"
  • Example: transport = \"tcp\"

The SIP transport parameter used when calling the proxy.

"},{"location":"modules/mod_jingle_sip/#modulesmod_jingle_sipusername_to_phone","title":"modules.mod_jingle_sip.username_to_phone","text":"
  • Syntax: Array of TOML tables with the following keys: username and phone, and string values
  • Default: []
  • Example: username_to_phone = [{username = \"2000006168\", phone = \"+919177074440\"}]

Allows mapping JIDs to phone numbers and vice versa.

The simplest configuration is the following:

[modules.mod_jingle_sip]\n

With this configuration MongooseIM will try sending SIP messages to a SIP proxy listening on localhost and port 5600.

"},{"location":"modules/mod_jingle_sip/#use-cases-covered-by-tests","title":"Use cases covered by tests","text":"

Currently to test the functionality we use a SIP Proxy mock written in Erlang. The following scenarios are covered by our tests in big_tests/tests/jingle_SUITE.erl

All the sequence diagrams where generated with textart.io/sequence. The source code is embedded in the markdown file below every diagram inside a comment <!--- --->

"},{"location":"modules/mod_jingle_sip/#1-establishing-a-session-with-another-xmpp-user","title":"1. Establishing a session with another XMPP user","text":"

With the mod_jingle_sip enabled, all Jingle IQ set stanzas listed above are intercepted, translated to SIP packets and sent to a SIP Proxy. This means that the current implementation will also translate stanzas addressed to a user in the same domain. This allows the SIP entity to control how the call between XMPP users is established. Below there are sequence diagrams showing the communication between XMPP users, MongooseIM and SIP Proxy as in our tests. It's possible that the SIP Proxy or other SIP entity decides that the call needs to be forked and delivered to the user's phone number instead of generating a corresponding call back to MongooseIM.

"},{"location":"modules/mod_jingle_sip/#11-signaling-session-initiate-to-other-xmpp-user-via-sip-proxy","title":"1.1 Signaling session-initiate to other XMPP user via SIP Proxy","text":"
+-------+                       +-------------+       +-----------+                   +-------+\n| UserA |                       | MongooseIM  |       | SIPProxy  |                   | UserB |\n+-------+                       +-------------+       +-----------+                   +-------+\n    |                                  |                    |                             |\n    | session-initiate to UserB        |                    |                             |\n    |--------------------------------->|                    |                             |\n    | -------------------------\\       |                    |                             |\n    |-| Jingle stanza          |       |                    |                             |\n    | | action:session-initate |       |                    |                             |\n    | | sid: 123               |       |                    |                             |\n    | |------------------------|       | SIP INVITE         |                             |\n    |                                  |------------------->|                             |\n    |                                  | -------------\\     |                             |\n    |                                  |-| from:UserA |     |                             |\n    |                                  | | to:UserB   |     |                             |\n    |                                  | | sid: 123   |     |                             |\n    |                                  | |------------|     | create new call             |\n    |                                  |                    |----------------             |\n    |                                  |                    |               |             |\n    |                                  |                    |<---------------             |\n    |                                  |                    | ------------------------\\   |\n    |                                  |                    |-| SDP content can be    |   |\n    |                                  |                    | | changed for instance  |   |\n    |                                  |                    | | to inject a transport |   |\n    |                                  |         SIP INVITE | | canidate              |   |\n    |                                  |<-------------------| |-----------------------|   |\n    |                                  |     -------------\\ |                             |\n    |                                  |     | from:UserA |-|                             |\n    |                                  |     | to:UserB   | |                             |\n    |            --------------------\\ |     | sid:456    | |                             |\n    |            | yes, new SID: 456 |-|     |------------| |                             |\n    |            |-------------------| |                    |                             |\n    |                                  |                    |                             |\n    |                                  | session-initiate to UserB                        |\n    |                                  |------------------------------------------------->|\n    |                                  |                    |                             |\n
"},{"location":"modules/mod_jingle_sip/#12-signaling-session-accept-to-other-xmpp-user-via-sip-proxy","title":"1.2 Signaling session-accept to other XMPP user via SIP Proxy","text":"

When the other user accepts the call invite sent by the first, the following sequence is executed. This is a continuation of the previous example

+-------+                       +-------------+        +-----------+                   +-------+\n| UserA |                       | MongooseIM  |        | SIPProxy  |                   | UserB |\n+-------+                       +-------------+        +-----------+                   +-------+\n    |                                  |                     |                             |\n    |                                  |                     |     session-accpet to UserA |\n    |                                  |<--------------------------------------------------|\n    |                                  |                     |   ------------------------\\ |\n    |                                  |                     |   | Jingle stanza         |-|\n    |                                  |                     |   | action:session-accept | |\n    |                                  |                     |   | sid: 456              | |\n    |                                  | 200 OK              |   |-----------------------| |\n    |                                  |-------------------->|                             |\n    |                                  | --------------\\     |                             |\n    |                                  |-| from: UserA |     |                             |\n    |                                  | | to: UserB   |     |                             |\n    |                                  | | sid: 456    |     |                             |\n    |                                  | |-------------|     | find corresponding call     |\n    |                                  |                     |------------------------     |\n    |                                  |                     |                       |     |\n    |                                  |                     |<-----------------------     |\n    |                                  |                     |                             |\n    |                                  |              200 OK |                             |\n    |                                  |<--------------------|                             |\n    |                                  |     --------------\\ |                             |\n    |                                  |     | from: UserA |-|                             |\n    |                                  |     | to: UserB   | |                             |\n    |                                  |     | sid: 123    | |                             |\n    |        session-accept from UserB |     |-------------| |                             |\n    |<---------------------------------|                     |                             |\n    |                                  |                     |                             |\n
"},{"location":"modules/mod_jingle_sip/#13-terminating-a-call","title":"1.3 Terminating a call","text":"

Any Jingle session (accepted or not) can be terminated by sending a Jingle stanza with action session-terminate and a reason. In the SIP world it's more complex. See the following examples for more information.

"},{"location":"modules/mod_jingle_sip/#131-terminating-an-accepted-call","title":"1.3.1 Terminating an accepted call","text":"

The easiest scenario is when the call was accepted as in 1.2. In this case one of the users sends a session-terminate Jingle action with a reason success. This is translated to a SIP BYE request with to and from headers set appropriately - from is the user who wants to terminate the call and to is the user on the other end of the session. The BYE request is sent to the SIP Proxy and then to the other user in a similar way to session acceptance.

"},{"location":"modules/mod_jingle_sip/#132-terminating-an-unanswered-call-by-initiator","title":"1.3.2 Terminating an unanswered call by initiator","text":"

To terminate the call before it's accepted, the initiator sends a Jingle session-terminate stanza with a reason decline. Then MongooseIM translates this to a SIP CANCEL request which is sent to the SIP Proxy.

"},{"location":"modules/mod_jingle_sip/#133-rejecting-the-call","title":"1.3.3 Rejecting the call","text":"

When the invitee wants to terminate the call, on the XMPP level this is also a Jingle session-terminate stanza with a reason decline. MongooseIM translates this to SIP 486 Busy Here Response (because this is a response to the invite request).

"},{"location":"modules/mod_jingle_sip/#2-establishing-a-session-with-a-sip-user","title":"2. Establishing a session with a SIP user","text":"

Establishing a session with a SIP user (or a SIP entity) works the same as in the previous section. The only difference is that the SIP Proxy will not call MongooseIM back (as it may happen for call to other XMPP user). Instead the SIP message sent by MongooseIM to SIP Proxy will be delivered directly to the SIP user's device.

"},{"location":"modules/mod_keystore/","title":"mod_keystore","text":""},{"location":"modules/mod_keystore/#module-description","title":"Module Description","text":"

mod_keystore serves as storage for crypto keys - it doesn't implement any XMPP-level protocol. The module can store transient RAM-only keys generated on module startup, stored in memory only, distributed to all cluster members and existing for only as long as the cluster is alive, as well as predefined and pre-shared keys which can be read from a file.

RAM-only keys provide better security since they are never written to persistent storage, at the cost of loss in case of a cluster-global failure or restart.

As of now mod_auth_token is the only module dependent on mod_keystore.

It's crucial to understand the distinction between single-tenant and multi-tenant hosting scenarios. In a multi-tenant server mod_keystore must be configured separately for each virtual XMPP domain to avoid sharing keys between domains!

"},{"location":"modules/mod_keystore/#options","title":"Options","text":""},{"location":"modules/mod_keystore/#modulesmod_keystoreram_key_size","title":"modules.mod_keystore.ram_key_size","text":"
  • Syntax: non-negative integer
  • Default: 2048
  • Example: ram_key_size = 10000

Size to use when generating RAM-only keys (designated by type ram).

"},{"location":"modules/mod_keystore/#modulesmod_keystorekeys","title":"modules.mod_keystore.keys","text":"
  • Syntax: Array of TOML tables with the following keys: \"name\", \"type\", \"path\", and following values: {name = string, type = values: \"file\", \"ram\", path = string}.
  • Default: []
  • Example: modules.mod_keystore.keys = [{name = \"access_psk\", type = \"file\", path = \"priv/access_psk\"}]

Names, types, and optional filepaths of the keys.

"},{"location":"modules/mod_keystore/#api","title":"API","text":"

The module public API is hook-based:

mongoose_hooks:get_key(Domain, [], KeyName).\n

An example of usage can be found in mod_auth_token:get_key_for_user/2.

"},{"location":"modules/mod_keystore/#example-configuration","title":"Example Configuration","text":"

Simple configuration - single tenant (i.e. server hosting just one XMPP domain):

[modules.mod_keystore]\n  keys = [{name = \"access_secret\", type = \"ram\"},\n          {name = \"access_psk\", type = \"file\", path = \"priv/access_psk\"},\n          {name = \"provision_psk\", type = \"file\", path = \"priv/provision_psk\"}]\n

Multi-tenant setup (mod_keystore configured differently for each virtual XMPP domain):

[[host_config]]\n  host = \"first.com\"\n\n  [host_config.modules.mod_keystore]\n    keys = [{name = \"access_secret\", type = \"ram\"},\n            {name = \"access_psk\", type = \"file\", path = \"priv/first_access_psk\"},\n            {name = \"provision_psk\", type = \"file\", path = \"priv/first_provision_psk\"}]\n\n[[host_config]]\n  host = \"second.com\"\n\n  [host_config.modules.mod_keystore]\n    keys = [{name = \"access_secret\", type = \"ram\"},\n            {name = \"access_psk\", type = \"file\", path = \"priv/second_access_psk\"},\n            {name = \"provision_psk\", type = \"file\", path = \"priv/second_provision_psk\"}]\n

Minimal configuration supporting mod_auth_token:

[modules.mod_keystore]\n  keys = [{name = \"token_secret\", type = \"ram\"}]\n
"},{"location":"modules/mod_last/","title":"mod_last","text":""},{"location":"modules/mod_last/#module-description","title":"Module Description","text":"

Implements XEP-0012: Last Activity.

Use with caution, as it was observed that a user disconnect spike might result in overloading the database with \"last activity\" writes.

"},{"location":"modules/mod_last/#options","title":"Options","text":""},{"location":"modules/mod_last/#modulesmod_lastiqdisctype","title":"modules.mod_last.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_last/#modulesmod_lastbackend","title":"modules.mod_last.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Storage backend.

"},{"location":"modules/mod_last/#example-configuration","title":"Example Configuration","text":"
[modules.mod_last]\n  backend = \"rdbms\"\n
"},{"location":"modules/mod_last/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) get_last A timestamp is fetched from DB. set_last_info A timestamp is stored in DB."},{"location":"modules/mod_mam/","title":"mod_mam","text":""},{"location":"modules/mod_mam/#module-description","title":"Module Description","text":"

This module implements XEP-0313: Message Archive Management. It enables a service to store all user messages for one-to-one chats as well as group chats (MUC, MultiUser Chat). It uses XEP-0059: Result Set Management for paging. It is a highly customizable module, that requires some skill and knowledge to operate properly and efficiently.

MongooseIM is compatible with MAM 0.4-1.1.0.

Configure MAM with different storage backends:

  • RDBMS (databases like MySQL, PostgreSQL, MS SQL Server)
  • Cassandra (NoSQL)
  • ElasticSearch (NoSQL)

mod_mam is a meta-module that ensures all relevant mod_mam_* modules are loaded and properly configured.

"},{"location":"modules/mod_mam/#message-retraction","title":"Message retraction","text":"

This module supports XEP-0424: Message Retraction with RDBMS storage backends. When a retraction message is received, the MAM module finds the message to retract and replaces it with a tombstone.

The following criteria are used to find the original message:

  • The id attribute specified in the apply-to element of the retraction message has to be the same as the id attribute of the origin-id (or stanza-id when configured, see below) element of the original message.
  • Both messages need to originate from the same user.
  • Both messages need to be addressed to the same user.

If more than one message matches the criteria, only the most recent one is retracted. To avoid this case, it is recommended to use a unique identifier (UUID) as the origin ID.

"},{"location":"modules/mod_mam/#retraction-on-the-stanza-id","title":"Retraction on the stanza-id","text":"

This module also implements an extension to the XEP, where it allows to specify the stanza-id as created by the server's MAM, instead of the origin-id that the original XEP-0424 specifies. It announces this capability under the namespace urn:esl:message-retract-by-stanza-id:0. This is specially useful in groupchats where the stanza-id of a message is shared and known for all participants.

In this case, to use such functionality,

<apply-to id=\"origin-id-1\" xmlns=\"urn:xmpp:fasten:0\">\n  <retract xmlns='urn:xmpp:message-retract:0'/>\n</apply-to>\n
turns into
<apply-to id=\"stanza-id-1\" xmlns=\"urn:xmpp:fasten:0\">\n  <retract xmlns='urn:esl:message-retract-by-stanza-id:0'/>\n</apply-to>\n
and likewise, the answer would be tagged by the mentioned esl namespace.

"},{"location":"modules/mod_mam/#full-text-search","title":"Full Text Search","text":"

This module allows message filtering by their text body (if enabled, see Common backend options). This means that an XMPP client, while requesting messages from the archive may not only specify standard form fields (with, start, end), but also full-text-search (of type text-single). If this happens, the client will receive only messages that contain words specified in the request.

The exact behaviour, like whether word ordering matters, may depend on the storage backend in use. For now rdbms backend has very limited support for this feature, while cassandra does not support it at all. elasticsearch backend, on the other hand, should provide you with the best results when it comes to text filtering.

mod_mam_rdbms_arch returns all messages that contain all search words, order of words does not matter. Messages are sorted by timestamp (not by relevance).

"},{"location":"modules/mod_mam/#note-on-full-text-search-with-elasticsearch-backend","title":"Note on full text search with ElasticSearch backend","text":"

When using ElasticSearch MAM backend, the value provided in full-text-search form field will be passed to ElasticSearch as Simple Search Query. If you're using our official ElasticSearch mappings from priv/elasticsearch then the query analyzer is set to english. Also note that the default separator for the search query is AND (which roughly means that ElasticSearch will search for messages containing all the terms provided in the query string).

"},{"location":"modules/mod_mam/#options","title":"Options","text":""},{"location":"modules/mod_mam/#modulesmod_mambackend","title":"modules.mod_mam.backend","text":"
  • Syntax: string, one of \"rdbms\", \"cassandra\" and \"elasticsearch\"
  • Default: \"rdbms\"
  • Example: backend = \"elasticsearch\"

Database backend to use.

"},{"location":"modules/mod_mam/#modulesmod_mamno_stanzaid_element","title":"modules.mod_mam.no_stanzaid_element","text":"
  • Syntax: boolean
  • Default: false
  • Example: no_stanzaid_element = true

Do not add a <stanza-id/> element from MAM v1.1.0.

"},{"location":"modules/mod_mam/#modulesmod_mamis_archivable_message","title":"modules.mod_mam.is_archivable_message","text":"
  • Syntax: non-empty string
  • Default: \"mod_mam_utils\"
  • Example: is_archivable_message = \"mod_mam_utils\"

Name of a module implementing is_archivable_message/3 callback that determines if the message should be archived.

"},{"location":"modules/mod_mam/#modulesmod_mamsend_message","title":"modules.mod_mam.send_message","text":"
  • Syntax: non-empty string
  • Default: \"mod_mam_utils\"
  • Example: send_message = \"mod_mam_utils\"

Name of a module implementing send_message/4 callback that routes a message during lookup operation. Consult with mod_mam_utils:send_message/4 code for more information.

Check big_tests/tests/mam_send_message_SUITE_data/mam_send_message_example.erl file in the MongooseIM repository for the usage example.

"},{"location":"modules/mod_mam/#modulesmod_mamarchive_chat_markers","title":"modules.mod_mam.archive_chat_markers","text":"
  • Syntax: boolean
  • Default: false
  • Example: archive_chat_markers = true

If set to true, XEP-0333 chat markers will be archived. See more details here.

"},{"location":"modules/mod_mam/#modulesmod_mammessage_retraction","title":"modules.mod_mam.message_retraction","text":"
  • Syntax: boolean
  • Default: true
  • Example: message_retraction = false

Enables XEP-0424: Message Retraction. This functionality is currently implemented only for the rdbms backend. Retraction messages are always archived regardless of this option.

backend, no_stanzaid_element, is_archivable_message and message_retraction will be applied to both pm and muc (if they are enabled), unless overridden explicitly (see example below).

"},{"location":"modules/mod_mam/#enable-one-to-one-message-archive","title":"Enable one-to-one message archive","text":"

Archive for one-to-one messages can be enabled in one of two ways:

  • Specify [mod_mam.pm] section
[modules.mod_mam]\n[modules.mod_mam.pm] # defining this section enables PM support\n
  • Define any PM related option
[modules.mod_mam]\n  pm.backend = \"rdbms\" # enables PM support and overrides its backend\n
"},{"location":"modules/mod_mam/#disable-one-to-one-message-archive","title":"Disable one-to-one message archive","text":"

To disable archive for one-to-one messages please remove PM section or any PM related option from the config file.

"},{"location":"modules/mod_mam/#pm-specific-options","title":"PM-specific options","text":""},{"location":"modules/mod_mam/#modulesmod_mampmarchive_groupchats","title":"modules.mod_mam.pm.archive_groupchats","text":"
  • Syntax: boolean
  • Default: false
  • Example: modules.mod_mam.pm.archive_groupchats = true

When enabled, MAM will store groupchat messages in recipients' individual archives. USE WITH CAUTION! May increase archive size significantly. Disabling this option for existing installation will neither remove such messages from MAM storage, nor will filter out them from search results. Clients can use include-groupchat filter to filter out groupchat messages while querying the archive.

Warning

The include-groupchat filter doesn't work for Cassandra backend.

"},{"location":"modules/mod_mam/#modulesmod_mampmsame_mam_id_for_peers","title":"modules.mod_mam.pm.same_mam_id_for_peers","text":"
  • Syntax: boolean
  • Default: false
  • Example: modules.mod_mam.pm.same_mam_id_for_peers = true

When enabled, MAM will set the same MAM ID for both sender and recipient. This can be useful in combination with retraction on the stanza-id. Note that this might not work with clients across federation, as the recipient might not implement the same retraction, nor the same IDs.

"},{"location":"modules/mod_mam/#enable-muc-message-archive","title":"Enable MUC message archive","text":"

Archive for MUC messages can be enabled in one of two ways:

  • Specify [mod_mam.muc] section
[modules.mod_mam]\n[modules.mod_mam.muc] # defining this section enables MUC support\n
  • Define any MUC related option
[modules.mod_mam]\n  muc.backend = \"rdbms\" # enables MUC support and overrides its backend\n
"},{"location":"modules/mod_mam/#disable-muc-message-archive","title":"Disable MUC message archive","text":"

To disable archive for MUC messages please remove MUC section or any MUC related option from the config file.

"},{"location":"modules/mod_mam/#muc-specific-options","title":"MUC-specific options","text":""},{"location":"modules/mod_mam/#modulesmod_mammuchost","title":"modules.mod_mam.muc.host","text":"
  • Syntax: string
  • Default: \"conference.@HOST@\"
  • Example: modules.mod_mam.muc.host = \"conference.@HOST@\"

The MUC host that will be archived if MUC archiving is enabled.

Warning

If you are using MUC Light, make sure this option is set to the MUC Light domain

"},{"location":"modules/mod_mam/#example","title":"Example","text":"

The example below presents how to override common option for muc module specifically. Please note that you can override all common options (except cache) in a similar way.

[modules.mod_mam]\n  backend = \"rdbms\"\n  async_writer.enabled = true # this option enables async writer for RDBMS backend\n  muc.async_writer.enabled = false # disable async writer for MUC archive only\n
"},{"location":"modules/mod_mam/#rdbms-backend-options","title":"RDBMS backend options","text":"

These options will only have effect when the rdbms backend is used:

"},{"location":"modules/mod_mam/#modulesmod_mamcache_users","title":"modules.mod_mam.cache_users","text":"
  • Syntax: boolean
  • Default: true
  • Example: modules.mod_mam.cache_users = false

Enables Archive ID to integer mappings cache.

If caching is enabled, by default it will spawn its own segmented cache cache, with defaults as in mod_cache_users. To change these defaults, the same config can be accessed within the cache key. To see details about the meaning of each flag, see mod_cache_users. To reuse the cache already created by mod_cache_users, see the option below.

modules.mod_mam.cache.strategy\nmodules.mod_mam.cache.time_to_live\nmodules.mod_mam.cache.number_of_segments\n
"},{"location":"modules/mod_mam/#modulesmod_mamcachemodule","title":"modules.mod_mam.cache.module","text":"
  • Syntax: string, one of \"mod_cache_users\" or \"internal\"
  • Default: internal
  • Example: modules.mod_mam.cache.module = \"mod_cache_users\"

Configures which cache to use, either start an internal instance, or reuse the cache created by mod_cache_users, if such module was enabled. Note that if reuse is desired \u2013 that is, cache.module = \"mod_cache_users\", other cache configuration parameters will be ignored.

"},{"location":"modules/mod_mam/#modulesmod_mamasync_writerenabled","title":"modules.mod_mam.async_writer.enabled","text":"
  • Syntax: boolean
  • Default: true
  • Example: modules.mod_mam.async_writer.enabled = false

Enables an asynchronous writer that is faster than the synchronous one but harder to debug. The async writers store batches of messages that will be flush on a timeout (see flush_interval) or when the batch reaches a size (see batch_size), so the results of the lookup operations executed right after message routing may be incomplete until the configured time passes or the queue is full.

"},{"location":"modules/mod_mam/#modulesmod_mamasync_writerflush_interval","title":"modules.mod_mam.async_writer.flush_interval","text":"
  • Syntax: non-negative integer
  • Default: 2000
  • Example: modules.mod_mam.async_writer.flush_interval = 2000

How often (in milliseconds) the buffered messages are flushed to DB.

"},{"location":"modules/mod_mam/#modulesmod_mamasync_writerbatch_size","title":"modules.mod_mam.async_writer.batch_size","text":"
  • Syntax: non-negative integer
  • Default: 30
  • Example: modules.mod_mam.async_writer.batch_size = 30

Max size of the batch for an async writer before the queue is considered full and flushed. If the buffer is full, messages are flushed to a database immediately and the flush timer is reset.

"},{"location":"modules/mod_mam/#modulesmod_mamasync_writerpool_size","title":"modules.mod_mam.async_writer.pool_size","text":"
  • Syntax: non-negative integer
  • Default: 4 * erlang:system_info(schedulers_online)
  • Example: modules.mod_mam.async_writer.pool_size = 32

Number of workers in the pool. More than the number of available schedulers is recommended, to minimise lock contention on the message queues, and more than the number of DB workers, to fully utilise the DB capacity. How much more than these two parameters is then a good fine-tuning for specific deployments.

"},{"location":"modules/mod_mam/#common-backend-options","title":"Common backend options","text":""},{"location":"modules/mod_mam/#modulesmod_mamuser_prefs_store","title":"modules.mod_mam.user_prefs_store","text":"
  • Syntax: one of \"rdbms\", \"cassandra\", \"mnesia\"
  • Default: not set
  • Example: modules.mod_mam.user_prefs_store = \"rdbms\"

Leaving this option unset will prevent users from setting their archiving preferences. It will also increase performance. The possible values are:

  • \"rdbms\" (RDBMS backend only) - User archiving preferences saved in RDBMS. Slow and not recommended, but might be used for simplicity (keeping everything in RDBMS).
  • \"cassandra\" (Cassandra backend only) - User archiving preferences are saved in Cassandra.
  • \"mnesia\" (recommended) - User archiving preferences saved in Mnesia and accessed without transactions. Recommended in most deployments, could be overloaded with lots of users updating their preferences at once. There's a small risk of an inconsistent (in a rather harmless way) state of the preferences table.
"},{"location":"modules/mod_mam/#modulesmod_mamfull_text_search","title":"modules.mod_mam.full_text_search","text":"
  • Syntax: boolean
  • Default: true
  • Example: modules.mod_mam.full_text_search = false

Enables full text search in message archive (see Full Text Search paragraph). Please note that the full text search is currently only implemented for \"rdbms\" backend. Also, full text search works only for messages archived while this option is enabled.

"},{"location":"modules/mod_mam/#is_archivable_message3-callback","title":"is_archivable_message/3 callback","text":"

is_archivable_message option has to name a module exporting is_archivable_message/3 function conforming to the spec:

-spec is_archivable_message(Mod :: module(), Dir :: incoming | outgoing,\n                          Packet :: exml:element()) -> boolean().\n

Servers SHOULD NOT archive messages that do not have a <body/> child tag. Servers SHOULD NOT archive delayed messages.

By default, all messages that hold meaningful content, rather than state changes such as Chat State Notifications, are archived.

"},{"location":"modules/mod_mam/#archiving-chat-markers","title":"Archiving chat markers","text":"

Archiving chat markers can be enabled by setting archive_chat_markers option to true. However it only works if is_archivable_message callback module is set to mod_mam_utils or isn't set at all.

When performing full text search chat markers are treated as if they had empty message body.

"},{"location":"modules/mod_mam/#cassandra-backend","title":"Cassandra backend","text":"

Please consult Outgoing connections page to learn how to properly configure Cassandra connection pool. By default, mod_mam Cassandra backend requires global pool with default tag.

"},{"location":"modules/mod_mam/#elasticsearch-backend","title":"ElasticSearch backend","text":"

First, make sure that your ElasticSearch cluster has expected indexes and mappings in place. Please consult Outgoing connections page to learn how to properly configure ElasticSearch connection pool.

"},{"location":"modules/mod_mam/#low-level-options","title":"Low-level options","text":"

These options allow for fine-grained control over MAM behaviour.

"},{"location":"modules/mod_mam/#modulesmod_mamdefault_result_limit","title":"modules.mod_mam.default_result_limit","text":"
  • Syntax: non-negative integer
  • Default: 50
  • Example: modules.mod_mam.default_result_limit = 100

This sets the default page size of returned results.

"},{"location":"modules/mod_mam/#modulesmod_mammax_result_limit","title":"modules.mod_mam.max_result_limit","text":"
  • Syntax: non-negative integer
  • Default: 50
  • Example: modules.mod_mam.max_result_limit = 100

This sets the maximum page size of returned results.

"},{"location":"modules/mod_mam/#modulesmod_mamenforce_simple_queries","title":"modules.mod_mam.enforce_simple_queries","text":"
  • Syntax: boolean
  • Default: false
  • Example: modules.mod_mam.enforce_simple_queries = true

This enforces all mam lookups to be \"simple\", i.e., they skip the RSM count. See Message Archive Management extensions.

"},{"location":"modules/mod_mam/#modulesmod_mamdelete_domain_limit","title":"modules.mod_mam.delete_domain_limit","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: modules.mod_mam.delete_domain_limit = 10000

Domain deletion can be an expensive operation, as it requires to delete potentially many thousands of records from the DB. By default, the delete operation deletes everything in a transaction, but it might be desired, to handle timeouts and table locks more gracefully, to delete the records in batches. This limit establishes the size of the batch.

Note

Not supported by MSSQL.

"},{"location":"modules/mod_mam/#modulesmod_mamdb_jid_format","title":"modules.mod_mam.db_jid_format","text":"
  • Syntax: string, one of \"mam_jid_rfc\", \"mam_jid_rfc_trust\", \"mam_jid_mini\" or a module implementing mam_jid behaviour
  • Default: \"mam_jid_rfc\" for MUC archive, \"mam_jid_mini\" for PM archive
  • Example: modules.mod_mam.db_jid_format = \"mam_jid_mini\"

Sets the internal MAM jid encoder/decoder module for RDBMS.

Warning

Archive MUST be empty to change this option

"},{"location":"modules/mod_mam/#modulesmod_mamdb_message_format","title":"modules.mod_mam.db_message_format","text":"
  • Syntax: string, one of \"mam_message_xml\", \"mam_message_eterm\", \"mam_message_compressed_eterm\" or a module implementing mam_message behaviour
  • Default: \"mam_message_compressed_eterm\" for RDBMS, \"mam_message_xml\" for Cassandra
  • Example: modules.mod_mam.db_message_format = \"mam_message_compressed_eterm\"

Sets the internal MAM message encoder/decoder module.

Warning

Archive MUST be empty to change this option

"},{"location":"modules/mod_mam/#modulesmod_mamextra_fin_element","title":"modules.mod_mam.extra_fin_element","text":"
  • Syntax: string, a module implementing the extra_fin_element/3 callback
  • Default: none
  • Example: modules.mod_mam.extra_fin_element = \"example_mod\"

This module can be used to add subelements to the <fin> element of the MAM lookup query response. It can be useful to be able to add information to a mam query, that doesn't belong to any specific message but to all of them.

"},{"location":"modules/mod_mam/#modulesmod_mamextra_lookup_params","title":"modules.mod_mam.extra_lookup_params","text":"
  • Syntax: string, a module implementing the extra_lookup_params/2 callback
  • Default: none
  • Example: modules.mod_mam.extra_lookup_params = \"example_mod\"

This module can be used to add extra lookup parameters to MAM lookup queries.

"},{"location":"modules/mod_mam/#example-configuration","title":"Example configuration","text":"
[modules.mod_mam]\n  backend = \"rdbms\"\n  no_stanzaid_element = true\n\n  pm.user_prefs_store = \"rdbms\"\n\n  muc.host = \"muc.example.com\"\n  muc.db_message_format = \"mam_message_xml\"\n  muc.async_writer.enabled = false\n  muc.user_prefs_store = \"mnesia\"\n
"},{"location":"modules/mod_mam/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [HostType, modMamArchiveRemoved] spiral User's entire archive is removed. [HostType, modMamArchived] spiral A message is stored in user's archive. [HostType, modMamDropped] spiral A message couldn't be stored in the DB (and got dropped). [HostType, modMamDroppedIQ] spiral MAM IQ has been dropped due to: high query frequency/invalid syntax or type. [HostType, modMamFlushed] spiral Message was stored in a DB asynchronously. [HostType, modMamForwarded] spiral A message is sent to a client as a part of a MAM query result. [HostType, modMamLookups] spiral A MAM lookup is performed. [HostType, modMamPrefsGets] spiral Archiving preferences have been requested by a client. [HostType, modMamPrefsSets] spiral Archiving preferences have been updated by a client. [HostType, modMucMamArchiveRemoved] spiral Room's entire archive is removed. [HostType, modMucMamArchived] spiral A message is stored in room's archive. [HostType, modMucMamForwarded] spiral A message is sent to a client as a part of a MAM query result from MUC room. [HostType, modMucMamLookups] spiral A MAM lookup in MUC room is performed. [HostType, modMucMamPrefsGets] spiral MUC archiving preferences have been requested by a client. [HostType, modMucMamPrefsSets] spiral MUC archiving preferences have been updated by a client. [HostType, mod_mam_rdbms_async_pool_writer, per_message_flush_time] histogram Average time per message insert measured in an async MAM worker. [HostType, mod_mam_rdbms_async_pool_writer, flush_time] histogram Average time per flush of all buffered messages measured in an async MAM worker. [HostType, mod_mam_muc_rdbms_async_pool_writer, per_message_flush_time] histogram Average time per message insert measured in an async MUC MAM worker. [HostType, mod_mam_muc_rdbms_async_pool_writer, flush_time] histogram Average time per flush of all buffered messages measured in an async MUC MAM worker. Backend action Description (when it gets incremented) lookup A lookup in an archive. archive One message is saved in an archive."},{"location":"modules/mod_muc/","title":"mod_muc","text":""},{"location":"modules/mod_muc/#module-description","title":"Module Description","text":"

This module implements XEP-0045: Multi-User Chat (MUC). It's a common XMPP group chat solution. This extension consists of two Erlang modules: mod_muc and mod_muc_room, the latter being the room code itself. Note that only mod_muc needs to be enabled in the configuration file. Also mod_muc_log is a logging submodule.

"},{"location":"modules/mod_muc/#options","title":"Options","text":""},{"location":"modules/mod_muc/#modulesmod_muchost","title":"modules.mod_muc.host","text":"
  • Syntax: string, a valid subdomain
  • Default: \"conference.@HOST@\"
  • Example: host = \"group.@HOST@\"

Subdomain for MUC service to reside under. @HOST@ is replaced with each served domain.

"},{"location":"modules/mod_muc/#modulesmod_mucbackend","title":"modules.mod_muc.backend","text":"
  • Syntax: string, one of \"mnesia\" or \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Storage backend to store rooms and settings persistently.

"},{"location":"modules/mod_muc/#modulesmod_muconline_backend","title":"modules.mod_muc.online_backend","text":"
  • Syntax: string, one of \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: online_backend = \"cets\"

Backend to use to register and find online rooms. Queried when routing stanzas to the rooms.

Warning

The corresponding internal database has to be enabled.

"},{"location":"modules/mod_muc/#modulesmod_mucaccess","title":"modules.mod_muc.access","text":"
  • Syntax: non-empty string
  • Default: \"all\"
  • Example: access = \"muc\"

Access Rule to determine who is allowed to use the MUC service.

"},{"location":"modules/mod_muc/#modulesmod_mucaccess_create","title":"modules.mod_muc.access_create","text":"
  • Syntax: non-empty string
  • Default: \"all\"
  • Example: access_create = \"muc_create\"

Access Rule to determine who is allowed to create rooms.

"},{"location":"modules/mod_muc/#modulesmod_mucaccess_admin","title":"modules.mod_muc.access_admin","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: access_admin = \"muc_create\"

Access Rule to determine who is the administrator in all rooms.

"},{"location":"modules/mod_muc/#modulesmod_mucaccess_persistent","title":"modules.mod_muc.access_persistent","text":"
  • Syntax: non-empty string
  • Default: \"all\"
  • Example: access_persistent = \"none\"

Access Rule to determine who is allowed to make the rooms persistent. In order to change this parameter, the user must not only match the Access Rule but also be the owner of the room.

"},{"location":"modules/mod_muc/#modulesmod_muchistory_size","title":"modules.mod_muc.history_size","text":"
  • Syntax: non-negative integer
  • Default: 20
  • Example: history_size = 30

Room message history to be kept in RAM. After node restart, the history is lost.

"},{"location":"modules/mod_muc/#modulesmod_mucroom_shaper","title":"modules.mod_muc.room_shaper","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: room_shaper = \"muc_room_shaper\"

Limits per-room data throughput with traffic shaper.

"},{"location":"modules/mod_muc/#modulesmod_mucmax_room_id","title":"modules.mod_muc.max_room_id","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_room_id = 30

Maximum room username length (in JID).

"},{"location":"modules/mod_muc/#modulesmod_mucmax_room_name","title":"modules.mod_muc.max_room_name","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_room_name = 30

Maximum room name length.

"},{"location":"modules/mod_muc/#modulesmod_mucmax_room_desc","title":"modules.mod_muc.max_room_desc","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_room_desc = 140

Maximum room description length.

"},{"location":"modules/mod_muc/#modulesmod_mucmin_message_interval","title":"modules.mod_muc.min_message_interval","text":"
  • Syntax: non-negative integer
  • Default: 0
  • Example: min_message_interval = 1

Minimal interval (in seconds) between messages processed by the room.

"},{"location":"modules/mod_muc/#modulesmod_mucmin_presence_interval","title":"modules.mod_muc.min_presence_interval","text":"
  • Syntax: non-negative integer
  • Default: 0
  • Example: min_presence_interval = 1

Minimal interval (in seconds) between presences processed by the room.

"},{"location":"modules/mod_muc/#modulesmod_mucmax_users","title":"modules.mod_muc.max_users","text":"
  • Syntax: positive integer
  • Default: 200
  • Example: max_users = 100

Absolute maximum user count per room on the node.

"},{"location":"modules/mod_muc/#modulesmod_mucmax_users_admin_threshold","title":"modules.mod_muc.max_users_admin_threshold","text":"
  • Syntax: positive integer
  • Default: 5
  • Example: max_users_admin_threshold = 10

When the server checks if a new user can join a room and they are an admin, max_users_admin_threshold is added to max_users during occupant limit check.

"},{"location":"modules/mod_muc/#modulesmod_mucuser_message_shaper","title":"modules.mod_muc.user_message_shaper","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: user_message_shaper = \"muc_user_msg_shaper\"

Shaper for user messages processed by a room (global for the room).

"},{"location":"modules/mod_muc/#modulesmod_mucuser_presence_shaper","title":"modules.mod_muc.user_presence_shaper","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: user_presence_shaper = \"muc_user_presence_shaper\"

Shaper for user presences processed by a room (global for the room).

"},{"location":"modules/mod_muc/#modulesmod_mucmax_user_conferences","title":"modules.mod_muc.max_user_conferences","text":"
  • Syntax: non-negative integer
  • Default: 10
  • Example: max_user_conferences = 5

Specifies the number of rooms that a user can occupy simultaneously.

"},{"location":"modules/mod_muc/#modulesmod_muchttp_auth_pool","title":"modules.mod_muc.http_auth_pool","text":"
  • Syntax: non-empty string
  • Default: \"none\"
  • Example: http_auth_pool = \"external_auth\"

If an external HTTP service is chosen to check passwords for password-protected rooms, this option specifies the HTTP pool name to use (see External HTTP Authentication below).

"},{"location":"modules/mod_muc/#modulesmod_mucload_permanent_rooms_at_startup","title":"modules.mod_muc.load_permanent_rooms_at_startup","text":"
  • Syntax: boolean
  • Default: false
  • Example: load_permanent_rooms_at_startup = true

Load all rooms at startup. Because it can be unsafe when there are many rooms, it is disabled by default.

"},{"location":"modules/mod_muc/#modulesmod_muchibernate_timeout","title":"modules.mod_muc.hibernate_timeout","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 90000 (milliseconds, 90 seconds)
  • Example: hibernate_timeout = 60000

Timeout (in milliseconds) defining the inactivity period after which the room's process should be hibernated.

"},{"location":"modules/mod_muc/#modulesmod_muchibernated_room_check_interval","title":"modules.mod_muc.hibernated_room_check_interval","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: hibernated_room_check_interval = 120000

Interval defining how often the hibernated rooms will be checked (a timer is global for a node).

"},{"location":"modules/mod_muc/#modulesmod_muchibernated_room_timeout","title":"modules.mod_muc.hibernated_room_timeout","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: hibernated_room_timeout = 120000

A time after which a hibernated room is stopped (deeply hibernated). See MUC performance optimisation.

"},{"location":"modules/mod_muc/#modulesmod_mucdefault_room","title":"modules.mod_muc.default_room","text":"
  • Syntax: A TOML table of options described below
  • Default: Default room options
  • Example:
  [modules.mod_muc.default_room]\n    password_protected = true\n    description = \"An example description.\"\n\n    [[modules.mod_muc.default_room.affiliations]]\n        user = \"alice\"\n        server = \"localhost\"\n        resource = \"resource1\"\n        affiliation = \"member\"\n

or:

  default_room.password_protected = true\n  default_room.description = \"An example description.\"\n\n  [[modules.mod_muc.default_room.affiliations]]\n    user = \"alice\"\n    server = \"localhost\"\n    resource = \"resource1\"\n    affiliation = \"member\"\n

Available room configuration options to be overridden in the initial state:

  • modules.mod_muc.default_room.title

    • Syntax: string
    • Default: \"\"
    • Example: title = \"example_title\"

    Room title, short free text.

  • modules.mod_muc.default_room.description

    • Syntax: string
    • Default: \"\"
    • Example: description = \"An example description.\"

    Room description, long free text.

  • modules.mod_muc.default_room.allow_change_subj

    • Syntax: boolean
    • Default: true
    • Example: allow_change_subj = false

    Allow all occupants to change the room subject.

  • modules.mod_muc.default_room.allow_query_users

    • Syntax: boolean
    • Default: true
    • Example: allow_query_users = false

    Allow occupants to send IQ queries to other occupants.

  • modules.mod_muc.default_room.allow_private_messages

    • Syntax: boolean
    • Default: true
    • Example: allow_private_messages = false

    Allow private messaging between occupants.

  • modules.mod_muc.default_room.allow_visitor_status

    • Syntax: boolean
    • Default: true
    • Example: allow_visitor_status = false

    Allow occupants to use text statuses in presences. When disabled, text is removed by the room before broadcasting.

  • modules.mod_muc.default_room.allow_visitor_nickchange

    • Syntax: boolean
    • Default: true
    • Example: allow_visitor_nickchange = false

    Allow occupants to change nicknames.

  • modules.mod_muc.default_room.public

    • Syntax: boolean
    • Default: true
    • Example: public = false

    Room is included in the list available via Service Discovery.

  • modules.mod_muc.default_room.public_list

    • Syntax: boolean
    • Default: true
    • Example: public_list = false

    Member list can be fetched by non-members.

  • modules.mod_muc.default_room.persistent

    • Syntax: boolean
    • Default: false
    • Example: persistent = true

    Room will be stored in DB and survive even when the last occupant leaves or the node is restarted.

  • modules.mod_muc.default_room.moderated

    • Syntax: boolean
    • Default: true
    • Example: moderated = false

    Only occupants with a \"voice\" can send group chat messages.

  • modules.mod_muc.default_room.members_by_default

    • Syntax: boolean
    • Default: true
    • Example: members_by_default = false

    All new occupants are members by default, unless they have a different affiliation assigned.

  • modules.mod_muc.default_room.members_only

    • Syntax: boolean
    • Default: false
    • Example: members_only = true

    Only users with a member affiliation can join the room.

  • modules.mod_muc.default_room.allow_user_invites

    • Syntax: boolean
    • Default: false
    • Example: allow_user_invites = true

    Allow ordinary members to send mediated invitations.

  • modules.mod_muc.default_room.allow_multiple_sessions

    • Syntax: boolean
    • Default: false
    • Example: allow_multiple_sessions = true

    Allow multiple user session to use the same nick.

  • modules.mod_muc.default_room.password_protected

    • Syntax: boolean
    • Default: false
    • Example: password_protected = true

    Room is protected with a password.

  • modules.mod_muc.default_room.password

    • Syntax: string
    • Default: \"\"
    • Example: password = \"secret\"

    Room password is required upon joining. This option has no effect when password_protected is false.

  • modules.mod_muc.default_room.anonymous

    • Syntax: boolean
    • Default: true
    • Example: anonymous = false

    Room is anonymous, meaning occupants can't see each others real JIDs, except for the room moderators.

  • modules.mod_muc.default_room.max_users

    • Syntax: positive integer
    • Default: 200
    • Example: max_users = 100

    Maximum user count per room. Admins and the room owner are not affected.

  • modules.mod_muc.default_room.logging

    • Syntax: boolean
    • Default: false
    • Example: logging = true

    Enables logging of room events (messages, presences) to a file on the disk. Uses mod_muc_log.

  • modules.mod_muc.default_room.maygetmemberlist

    • Syntax: array of non-empty strings
    • Default: []
    • Example: maygetmemberlist = [\"moderator\"]

    An array of roles and/or privileges that enable retrieving the room's member list.

  • modules.mod_muc.default_room.affiliations

    • Syntax: array of tables with keys:
      • user - non-empty string,
      • server - string, a valid domain,
      • resource - string,
      • affiliation - non-empty string
    • Default: []
    • Example:
[[modules.mod_muc.default_room.affiliations]]\n  user = \"alice\"\n  server = \"localhost\"\n  resource = \"resource1\"\n  affiliation = \"member\"\n\n[[modules.mod_muc.default_room.affiliations]]\n  user = \"bob\"\n  server = \"localhost\"\n  resource = \"resource2\"\n  affiliation = \"owner\"\n

This is the default list of affiliations set for every new room.

  • modules.mod_muc.default_room.subject

    • Syntax: string
    • Default: \"\"
    • Example: subject = \"Lambda days\"

    A default subject for new room.

  • modules.mod_muc.default_room.subject_author

    • Syntax: string
    • Default: \"\"
    • Example: subject_author = \"Alice\"

    A nick name of the default subject's author.

"},{"location":"modules/mod_muc/#example-configuration","title":"Example Configuration","text":"
[modules.mod_muc]\n  host = \"muc.example.com\"\n  access = \"muc\"\n  access_create = \"muc_create\"\n  http_auth_pool = \"my_auth_pool\"\n  default_room.password_protected = true\n\n  [[modules.mod_muc.default_room.affiliations]]\n    user = \"alice\"\n    server = \"localhost\"\n    resource = \"resource1\"\n    affiliation = \"member\"\n\n  [[modules.mod_muc.default_room.affiliations]]\n    user = \"bob\"\n    server = \"localhost\"\n    resource = \"resource2\"\n    affiliation = \"owner\"\n
"},{"location":"modules/mod_muc/#performance-optimisations","title":"Performance optimisations","text":"

Each room is represented by an Erlang process with its own state and can consume memory even if it's not used. In large installations with many rooms, this might cause performance issues. To address that problem MongooseIM has 2 levels of MUC rooms memory optimisations.

"},{"location":"modules/mod_muc/#rooms-process-hibernation","title":"Room's process hibernation","text":"

By default the room's process is hibernated by the Erlang VM 90 seconds after the last activity. This timeout can be modified by hibernate_timeout option.

"},{"location":"modules/mod_muc/#room-deep-hibernation","title":"Room deep hibernation","text":"

MongooseIM introduces an additional option of deep hibernation for unused rooms. This optimisation works only for persistent rooms as only these can be restored on demand. The improvement works as follows: 1. All room processes are traversed at a chosen hibernated_room_check_interval. 1. If a hibernated_room_timeout is exceeded, a \"stop\" signal is sent to a unused room. 1. The room's process is stopped only if there are no online users or if the only one is its owner. If the owner is online, a presence of a type unavailable is sent to it indicating that the room's process is being terminated.

The room's process can be recreated on demand, for example when a presence sent to it, or the owner wants to add more users to the room.

"},{"location":"modules/mod_muc/#external-http-authentication","title":"External HTTP Authentication","text":"

MUC rooms can be protected by a password that is set by the room owner. Note that MongooseIM supports another custom solution, where each attempt to enter or create a room requires the password to be checked by an external HTTP service. To enable this option, you need to:

  • Configure an HTTP connection pool.
  • Set the name of the connection pool as the value of the http_auth_pool option of mod_muc.
  • Enable the password_protected default room option (without setting the password itself).

Whenever a user tries to enter or create a room, the server will receive a GET request to the check_password path. It should return a 200 response with a JSON object {\"code\": Code, \"msg\": Message} in the response body. If the server returns something else, an error presence will be sent back to the client.

  • Code is the status code: 0 indicates a successful authentication, any other value means the authentication failed.
  • Message is a string containing the message to be sent back to the XMPP client indicating the reason for a failed authentication. When authentication succeeds it is ignored and can contain anything ( eg. the string \"OK\").

Example:

[outgoing_pools.http.my_auth_pool]\n  strategy = \"available_worker\"\n  connection.host = \"http://my_server:8000\"\n\n[modules.mod_muc]\n  host = \"muc.example.com\"\n  access = \"muc\"\n  access_create = \"muc_create\"\n  http_auth_pool = \"my_auth_pool\"\n  default_room.password_protected = true\n
"},{"location":"modules/mod_muc/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [global, mod_muc, deep_hibernations] spiral A room process is stopped (applies only to persistent rooms). [global, mod_muc, process_recreations] spiral A room process is recreated from a persisted state. [global, mod_muc, hibernations] spiral A room process becomes hibernated (garbage collected and put in wait state). [global, mod_muc, hibernated_rooms] value How many rooms are in hibernated state. Does not include rooms in \"deep hibernation\". [global, mod_muc, online_rooms] value How many rooms have running processes (includes rooms in a hibernated state)."},{"location":"modules/mod_muc_light/","title":"mod_muc_light","text":""},{"location":"modules/mod_muc_light/#module-description","title":"Module Description","text":"

This module implements Multi-User Chat Light. It's an experimental XMPP group chat solution. This extension consists of several modules but only mod_muc_light needs to be enabled in the config file.

"},{"location":"modules/mod_muc_light/#options","title":"Options","text":""},{"location":"modules/mod_muc_light/#modulesmod_muc_lighthost","title":"modules.mod_muc_light.host","text":"
  • Syntax: string, a valid subdomain
  • Default: \"muclight.@HOST@\"
  • Example: host = \"group.@HOST@\"

Domain for the MUC Light service to reside under. @HOST@ is replaced with each served domain.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightbackend","title":"modules.mod_muc_light.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Database backend to use.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightcache_affs","title":"modules.mod_muc_light.cache_affs.*","text":"
  • Syntax: TOML section
  • Default: not declared
  • Example: [modules.mod_muc_light.cache_affs]

Enables caching affiliations for rooms, this has the advantage that the list of affiliations of a given room is stored locally, instead of being fetched from the DB on each message delivered to a room. On the other hand, in an edge case of a network split when the affiliations of a room are changed, there's a risk of inconsistencies for the cache having values in one node not yet synchronised with the other.

If caching is enabled, it will spawn its own segmented cache cache. To configure the cache parameters, the same config can be stored under the cache_affs section. To see details about the meaning of each flag, see mod_cache_users.

modules.mod_muc_light.cache_affs.strategy\nmodules.mod_muc_light.cache_affs.time_to_live\nmodules.mod_muc_light.cache_affs.number_of_segments\n
"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightequal_occupants","title":"modules.mod_muc_light.equal_occupants","text":"
  • Syntax: boolean
  • Default: false
  • Example: equal_occupants = true

When enabled, MUC Light rooms won't have owners. It means that every occupant will be a member, even the room creator.

Warning

This option does not implicitly set all_can_invite to true. If that option is set to false, nobody will be able to join the room after the initial creation request.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightlegacy_mode","title":"modules.mod_muc_light.legacy_mode","text":"
  • Syntax: boolean
  • Default: false
  • Example: legacy_mode = true

Enables XEP-0045 compatibility mode. It allows using a subset of classic MUC stanzas with some MUC Light functions limited.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightrooms_per_user","title":"modules.mod_muc_light.rooms_per_user","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: rooms_per_user = 100

Specifies a cap on a number of rooms a user can occupy.

Warning

Setting such a limit may trigger expensive DB queries for every occupant addition.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightblocking","title":"modules.mod_muc_light.blocking","text":"
  • Syntax: boolean
  • Default: true
  • Example: blocking = false

Blocking feature enabled/disabled.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightall_can_configure","title":"modules.mod_muc_light.all_can_configure","text":"
  • Syntax: boolean
  • Default: false
  • Example: all_can_configure = true

When enabled, all room occupants can change all configuration options. If disabled, everyone can still change the room subject.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightall_can_invite","title":"modules.mod_muc_light.all_can_invite","text":"
  • Syntax: boolean
  • Default: false
  • Example: all_can_invite = true

When enabled, all room occupants can add new occupants to the room. Occupants added by members become members as well.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightmax_occupants","title":"modules.mod_muc_light.max_occupants","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: \"infinity\"
  • Example: max_occupants = 100

Specifies a cap on the occupant count per room.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightrooms_per_page","title":"modules.mod_muc_light.rooms_per_page","text":"
  • Syntax: positive integer or the string \"infinity\"
  • Default: 10
  • Example: rooms_per_page = 100

Specifies maximal number of rooms returned for a single Disco request.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightrooms_in_rosters","title":"modules.mod_muc_light.rooms_in_rosters","text":"
  • Syntax: boolean
  • Default: false
  • Example: rooms_in_rosters = true

When enabled, rooms the user occupies are included in their roster.

"},{"location":"modules/mod_muc_light/#modulesmod_muc_lightconfig_schema","title":"modules.mod_muc_light.config_schema","text":"
  • Syntax: an array of config_schema items, as described below
  • Default:
        [[modules.mod_muc_light.config_schema]]\n          field = \"roomname\"\n          string_value = \"Untitled\"\n\n        [[modules.mod_muc_light.config_schema]]\n          field = \"subject\"\n          string_value = \"\"\n
  • Example:
        [[modules.mod_muc_light.config_schema]]\n          field = \"display-lines\"\n          integer_value = 30\n          internal_key = \"display_lines\"\n

Defines fields allowed in the room configuration.

Each config_schema item is a TOML table with the following keys:

  • field - mandatory, non-empty string - field name.
  • string_value, integer_value, float_value - exactly one of them has to be present, depending on the type of the field:
    • string_value - string,
    • integer_value - integer,
    • float_value - floating-point number.
  • internal_key - optional, non-empty string - field name used in the internal representation, useful only for debugging or custom applications. By default it is the same as field.

Warning

Lack of the roomname field will cause room names in Disco results and Roster items be set to the room username.

"},{"location":"modules/mod_muc_light/#example-configuration","title":"Example Configuration","text":"
[modules.mod_muc_light]\n  host = \"muclight.example.com\"\n  equal_occupants = true\n  legacy_mode = true\n  rooms_per_user = 10\n  blocking = false\n  all_can_configure = true\n  all_can_invite = true\n  max_occupants = 50\n  rooms_per_page = 5\n  rooms_in_rosters = true\n\n  [modules.mod_muc_light.cache_affs]\n    time_to_live = 60\n\n  [[modules.mod_muc_light.config_schema]] \n    field = \"roomname\"\n    string_value = \"The Room\"\n\n  [[modules.mod_muc_light.config_schema]] \n    field = \"display-lines\"\n    integer_value = 30\n    internal_key = \"display_lines\"\n
"},{"location":"modules/mod_muc_light/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) create_room A new room is stored in a DB. destroy_room Room data is removed from a DB. room_exists A room existence is checked. get_user_rooms A list of rooms the user is a participant of is retrieved from a DB. remove_user All MUC Light related user data is removed from a DB. get_config A room config is retrieved from a DB. set_config A room config is updated in a DB. get_blocking Blocking data is fetched from a DB. set_blocking Blocking data is updated in a DB. get_aff_users An affiliated users list is fetched from a DB. modify_aff_users Affiliations in a room are updated in a DB."},{"location":"modules/mod_muc_log/","title":"mod_muc_log","text":""},{"location":"modules/mod_muc_log/#module-description","title":"Module Description","text":"

A logging submodule for mod_muc. Is must be explicitly configured to work. It writes room-related information (configuration) and events (messages, presences) to files on the disk.

"},{"location":"modules/mod_muc_log/#options","title":"Options","text":""},{"location":"modules/mod_muc_log/#modulesmod_muc_logoutdir","title":"modules.mod_muc_log.outdir","text":"
  • Syntax: string
  • Default: \"www/muc\"
  • Example: outdir = \"www/muc\"

Filesystem directory where the files are stored.

"},{"location":"modules/mod_muc_log/#modulesmod_muc_logaccess_log","title":"modules.mod_muc_log.access_log","text":"
  • Syntax: non-empty string
  • Default: \"muc_admin\"
  • Example: access_log = \"muc_admin\"

ACL that defines who can enable/disable logging for specific rooms.

"},{"location":"modules/mod_muc_log/#modulesmod_muc_logdirtype","title":"modules.mod_muc_log.dirtype","text":"
  • Syntax: string, one of \"subdirs\", \"plain\"
  • Default: \"subdirs\"
  • Example: dirtype = \"subdirs\"

Specifies the log directory structure:

  • \"subdirs\": Module will use the following directory structure [Logs root]/[dirname]/YYYY/MM/ with file names being DD.[extension].
  • \"plain\": Module will use the following directory structure [Logs root]/[dirname]/ with file names being YYYY-MM-DD.[extension].
"},{"location":"modules/mod_muc_log/#modulesmod_muc_logdirname","title":"modules.mod_muc_log.dirname","text":"
  • Syntax: string, one of \"room_jid\", \"room_name\"
  • Default: \"room_jid\"
  • Example: dirname = \"room_jid\"

Specifies directory name created for each room:

  • \"room_jid\": Uses the room bare JID.
  • \"room_name\": Uses the room name from its configuration.
"},{"location":"modules/mod_muc_log/#modulesmod_muc_logfile_format","title":"modules.mod_muc_log.file_format","text":"
  • Syntax: string, one of \"html\", \"plaintext\"
  • Default: \"html\"
  • Example: file_format = \"html\"

Specifies the format of output files:

  • \"html\": The output is a fancy-formatted HTML page.
  • \"plaintext\": Just a text file, better suited for processing than HTML.
"},{"location":"modules/mod_muc_log/#modulesmod_muc_logcss_file","title":"modules.mod_muc_log.css_file","text":"
  • Syntax: non-empty string
  • Default: not set - the default styles for HTML logs are used
  • Example: css_file = \"path/to/css/file\"

Specifies the css file used for logs rendering. Please note it won't be copied to the logs directory but the given path will be linked in HTML files instead.

"},{"location":"modules/mod_muc_log/#modulesmod_muc_logtimezone","title":"modules.mod_muc_log.timezone","text":"
  • Syntax: string, one of \"local\", \"universal\"
  • Default: \"local\"
  • Example: timezone = \"universal\"

Specifies the timezone to be used in timestamps written into the logs:

  • local: Uses the local server timezone.
  • universal: Uses GMT.
"},{"location":"modules/mod_muc_log/#modulesmod_muc_logtop_link","title":"modules.mod_muc_log.top_link","text":"
  • Syntax: TOML table with the following mandatory keys: \"target\", \"text\" and string values.
  • Default: {target = \"/\", text = \"Home\"}
  • Example: top_link = {target = \"/top\", text = \"Top page\"}

Allows setting a custom link at the top of the HTML log file. First tuple element is the link target and the second one is the text to be displayed. You can put any HTML instead of just plain text.

"},{"location":"modules/mod_muc_log/#modulesmod_muc_logspam_prevention","title":"modules.mod_muc_log.spam_prevention","text":"
  • Syntax: boolean
  • Default: true
  • Example: spam_prevention = false

When enabled, MongooseIM will enforce rel=\"nofollow\" attribute in links sent by user and written to MUC logs.

"},{"location":"modules/mod_muc_log/#example-configuration","title":"Example Configuration","text":"
[modules.mod_muc_log]\n  outdir = \"/tmp/muclogs\"\n  access_log = \"muc\"\n  dirtype = \"plain\"\n  dirname = \"room_name\"\n  file_format = \"html\"\n  css_file = \"path/to/css/file\"\n  timezone = \"universal\"\n  top_link.target = \"/\"\n  top_link.text = \"Home\"\n
"},{"location":"modules/mod_offline/","title":"mod_offline","text":""},{"location":"modules/mod_offline/#module-description","title":"Module Description","text":"

This module implements an offline messages storage compliant with XEP-0160: Best Practices for Handling Offline Messages. It can store one-to-one and groupchat messages only when the recipient has no online resources. It is not well suited for applications supporting multiple user devices, because anything saved in the DB can be retrieved only once, so the message history is not synchronised between devices. Although mod_offline may be sufficient in some cases, it is preferable to use mod_mam.

If this module is disabled, an error 503 with text \"Bounce offline message\" would be sent back to the sender, each time a message is sent to an offline user. Check mod_offline_stub to disable this error message.

"},{"location":"modules/mod_offline/#options","title":"Options","text":""},{"location":"modules/mod_offline/#modulesmod_offlineaccess_max_user_messages","title":"modules.mod_offline.access_max_user_messages","text":"
  • Syntax: non-empty string
  • Default: \"max_user_offline_messages\"
  • Example: access_max_user_messages = \"custom_max_user_offline_messages\"

Access Rule to use for limiting the storage size per user.

"},{"location":"modules/mod_offline/#modulesmod_offlinebackend","title":"modules.mod_offline.backend","text":"
  • Syntax: string, one of mnesia, rdbms
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Storage backend.

"},{"location":"modules/mod_offline/#modulesmod_offlinestore_groupchat_messages","title":"modules.mod_offline.store_groupchat_messages","text":"
  • Syntax: boolean
  • Default: false
  • Example: store_groupchat_messages = true

Specifies whether or not we should store groupchat messages.

Warning

This option can work only with MUC-light and is not expected to work with MUC.

"},{"location":"modules/mod_offline/#example-configuration","title":"Example Configuration","text":"
[modules.mod_offline]\n  access_max_user_messages = \"max_user_offline_messages\"\n  backend = \"rdbms\"\n  store_groupchat_messages = true\n
"},{"location":"modules/mod_offline/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Type Description (when it gets incremented) pop_messages histogram Offline messages for a user are retrieved and deleted from a DB. write_messages histogram New offline messages to a user are written in a DB."},{"location":"modules/mod_offline_stub/","title":"mod_offline_stub","text":""},{"location":"modules/mod_offline_stub/#module-description","title":"Module Description","text":"

RFC 6121 requires a <service-unavailable/> stanza error to be sent to a user messaging an unavailable recipient if the message is not stored for delayed delivery (i.e. as an \"offline message\"). If the recipient exists (i.e. auth module returns true from does_user_exist), mod_mam stores the message, but is still returned. This is not compliant with the RFC. This module prevents returning . Please note that mod_offline_stub is not tightly coupled with mod_mam. It can be used as a standalone extension, if the specific application requires it.

"},{"location":"modules/mod_offline_stub/#options","title":"Options","text":"

None.

"},{"location":"modules/mod_offline_stub/#example-configuration","title":"Example Configuration","text":"
[modules.mod_offline_stub]\n
"},{"location":"modules/mod_ping/","title":"mod_ping","text":""},{"location":"modules/mod_ping/#module-description","title":"Module Description","text":"

This module implements XMPP Ping functionality as described in XEP-0199: XMPP Ping.

"},{"location":"modules/mod_ping/#options","title":"Options","text":""},{"location":"modules/mod_ping/#modulesmod_pingsend_pings","title":"modules.mod_ping.send_pings","text":"
  • Syntax: boolean
  • Default: false
  • Example: send_pings = true

If set to true, the server will send ping iqs to the client if they are not active for a ping_interval.

"},{"location":"modules/mod_ping/#modulesmod_pingping_interval","title":"modules.mod_ping.ping_interval","text":"
  • Syntax: positive integer (seconds)
  • Default: 60
  • Example: ping_interval = 30

Defines the client inactivity timeout after which the server will send a ping request if the above option is set to true.

"},{"location":"modules/mod_ping/#modulesmod_pingtimeout_action","title":"modules.mod_ping.timeout_action","text":"
  • Syntax: string, one of \"none\", \"kill\"
  • Default: \"none\"
  • Example: timeout_action = \"kill\"

Defines if the client connection should be closed if it doesn't reply to a ping request in less than ping_req_timeout.

"},{"location":"modules/mod_ping/#modulesmod_pingping_req_timeout","title":"modules.mod_ping.ping_req_timeout","text":"
  • Syntax: positive integer (seconds)
  • Default: 32
  • Example: ping_req_timeout = 60

Defines how long the server waits for the client to reply to the ping request.

"},{"location":"modules/mod_ping/#modulesmod_pingiqdisctype","title":"modules.mod_ping.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_ping/#example-configuration","title":"Example Configuration","text":"
[modules.mod_ping]\n  send_pings = true\n  ping_interval = 60\n  timeout_action = \"none\"\n  ping_req_timeout = 32\n
"},{"location":"modules/mod_ping/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [HostType, mod_ping, ping_response] spiral Client responds to a ping. [HostType, mod_ping, ping_response_timeout] spiral Ping request timeouts without a response from client. [HostType, mod_ping, ping_response_time] histogram Response times (doesn't include timeouts)."},{"location":"modules/mod_presence/","title":"mod_presence","text":""},{"location":"modules/mod_presence/#module-description","title":"Module Description","text":"

This module implements server-side presence handling as specified in RFC 6121.

According to RFC 6121, section 1.3:

it must be possible to use the protocol to provide a presence service, a messaging service, or both. (...) it is not mandatory for an XMPP service to offer both a presence service and a messaging service, and the protocol makes it possible to offer separate and distinct services for presence and for messaging.

This is why server-side presence management and broadcasting is provided separately by this module. It is enabled in the default configuration file, but you can disable it if your use case does not require server-side presence handling - this could significantly improve performance.

"},{"location":"modules/mod_presence/#options","title":"Options","text":"

This module has no configurable options.

"},{"location":"modules/mod_presence/#example-configuration","title":"Example Configuration","text":"
[modules.mod_presence]\n
"},{"location":"modules/mod_presence/#metrics","title":"Metrics","text":"

There are no metrics specific to this module.

"},{"location":"modules/mod_privacy/","title":"mod_privacy","text":""},{"location":"modules/mod_privacy/#module-description","title":"Module Description","text":"

This module implements XEP-0016: Privacy Lists. This extension allows user to block IQs, messages, presences, or all, based on JIDs, subscription, and roster groups.

"},{"location":"modules/mod_privacy/#options","title":"Options","text":""},{"location":"modules/mod_privacy/#modulesmod_privacybackend","title":"modules.mod_privacy.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\".
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"
"},{"location":"modules/mod_privacy/#example-configuration","title":"Example Configuration","text":"
[modules.mod_privacy]\n  backend = \"rdbms\"\n
"},{"location":"modules/mod_privacy/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) get_privacy_list A privacy list is retrieved from a DB. get_list_names Names of user's privacy lists are fetched from a DB. get_default_list A default privacy list for a user is fetched from a DB. set_default_list A default list's name for a user is set in a DB. forget_default_list A default list's name for a user is removed from a DB. remove_privacy_list A privacy list is deleted from a DB. replace_privacy_list A privacy list is updated (replaced) in a DB."},{"location":"modules/mod_private/","title":"mod_private","text":""},{"location":"modules/mod_private/#module-description","title":"Module Description","text":"

This module implements XEP-0049: Private XML Storage. It allows users to store custom XML data in the server's database. Used e.g. for storing roster groups separator.

"},{"location":"modules/mod_private/#options","title":"Options","text":""},{"location":"modules/mod_private/#modulesmod_privateiqdisctype","title":"modules.mod_private.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_private/#modulesmod_privatebackend","title":"modules.mod_private.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\".
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"

Database backend to use.

"},{"location":"modules/mod_private/#example-configuration","title":"Example Configuration","text":"
[modules.mod_private]\n  backend = \"mnesia\"\n
"},{"location":"modules/mod_private/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend operation Description (when it gets incremented) multi_get_data XML data is fetched from a DB. multi_set_data XML data is stored in a DB."},{"location":"modules/mod_pubsub/","title":"mod_pubsub","text":""},{"location":"modules/mod_pubsub/#what-is-pubsub","title":"What is PubSub?","text":"

PubSub is a design pattern which mostly promotes a loose coupling between two kinds of entities - publishers and subscribers. Like their names suggest, in the pubsub world we have publishers who fire events, and subscribers who wish to be notified about those events when publishers push data. There might be several subscribers, several publishers, and even several channels (or nodes) where the events are sent.

"},{"location":"modules/mod_pubsub/#module-description","title":"Module Description","text":"

This module implements XEP-0060: Publish-Subscribe. Due to the complexity of the protocol, the PubSub engine makes successive calls to the nodetree and node plugins in order to check the validity of requests, perform the corresponding action and return a result or appropriate error. Such an architecture makes it much easier to write custom pubsub plugins and add new storage backends. It's all about tailoring PubSub to your needs!

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_pubsub/#options","title":"Options","text":""},{"location":"modules/mod_pubsub/#modulesmod_pubsubiqdisctype","title":"modules.mod_pubsub.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubhost","title":"modules.mod_pubsub.host","text":"
  • Syntax: string
  • Default: \"pubsub.@HOST@\"
  • Example: host = \"pubsub.localhost\"

Subdomain for Pubsub service to reside under. @HOST@ is replaced with each served domain.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubbackend","title":"modules.mod_pubsub.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

Database backend to use.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubaccess_createnode","title":"modules.mod_pubsub.access_createnode","text":"
  • Syntax: string, rule name, or \"all\"
  • Default: \"all\"
  • Example: access_createnode = \"all\"

Specifies who is allowed to create pubsub nodes. The access rule referenced here needs to be defined in the access section.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubmax_items_node","title":"modules.mod_pubsub.max_items_node","text":"
  • Syntax: non-negative integer
  • Default: 10
  • Example: max_items_node = 10

Defines the maximum number of items that can be stored in a node.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubmax_subscriptions_node","title":"modules.mod_pubsub.max_subscriptions_node","text":"
  • Syntax: non-negative integer
  • Default: not specified (no limit)
  • Example: max_subscriptions_node = 10

The maximum number of subscriptions managed by a node. By default there is no limit.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubnodetree","title":"modules.mod_pubsub.nodetree","text":"
  • Syntax: string
  • Default: \"tree\"
  • Example: nodetree = \"tree\"

Specifies the storage and organisation of the pubsub nodes. See the section below.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubignore_pep_from_offline","title":"modules.mod_pubsub.ignore_pep_from_offline","text":"
  • Syntax: boolean
  • Default: true
  • Example: ignore_pep_from_offline = false

Specifies whether or not we should get last published PEP items from users in our roster which are offline when we connect. The default option is true hence we will get only the last items from the online contacts.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsublast_item_cache","title":"modules.mod_pubsub.last_item_cache","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\", \"false\"
  • Default: \"false\"
  • Example: last_item_cache = \"mnesia\"

If enabled, PubSub will cache the last published items in the nodes. It may increase PubSub performance but at a price of an increased memory usage.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubplugins","title":"modules.mod_pubsub.plugins","text":"
  • Syntax: array of strings
  • Default: [\"flat\"]
  • Example: plugins = [\"flat\", \"pep\"]

List of enabled pubsub plugins.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubpep_mapping","title":"modules.mod_pubsub.pep_mapping","text":"
  • Syntax: Array of TOML tables with the following keys: \"namespace\", \"node\" and string values.
  • Default: []
  • Example: pep_mapping = [{namespace = \"urn:xmpp:microblog:0\", node = \"mb\"}]

This permits creating a Key-Value list to define a custom node plugin on a given PEP namespace. E.g. pair {\"urn:xmpp:microblog:0\", \"mb\"} will use module node_mb instead of node_pep when the specified namespace is used.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubdefault_node_config","title":"modules.mod_pubsub.default_node_config","text":"
  • Syntax: TOML table with the following values: string, boolean or non-negative integer.
  • Default: {}
  • Example: default_node_config = {deliver_payloads = true, max_payload_size = 10000, node_type = \"leaf\"}

Overrides the default node configuration, regardless of the node plugin. Node configuration still uses the default configuration defined by the node plugin, and overrides any items by the value defined in this configurable list.

The possible options, altogether with their default values for each node plugin, are listed in the table below:

syntax node_flat / node_hometree node_pep node_dag node_push access_model non-empty string open presence open whitelist deliver_notifications boolean true true true true deliver_payloads boolean true true true true max_items non-negative integer 10 1 10 1 max_payload_size non-negative integer 60000 60000 60000 60000 node_type non-empty string N/A N/A leaf N/A notification_type non-empty string headline headline headline headline notify_config boolean false false false false notify_delete boolean false false false false notify_retract boolean false false false false persist_items boolean true true true false presence_based_delivery boolean false true false true publish_model non-empty string publishers publishers publishers open purge_offline boolean false false false false roster_groups_allowed non-empty string [] [] [] [] send_last_published_item non-empty string never on_sub_and_presence never on_sub_and_presence subscribe boolean true true true true"},{"location":"modules/mod_pubsub/#modulesmod_pubsubitem_publisher","title":"modules.mod_pubsub.item_publisher","text":"
  • Syntax: boolean
  • Default: false
  • Example: item_publisher = false

When enabled, a JID of the publisher will be saved in the item metadata. This effectively makes them an owner of this item.

"},{"location":"modules/mod_pubsub/#modulesmod_pubsubsync_broadcast","title":"modules.mod_pubsub.sync_broadcast","text":"
  • Syntax: boolean
  • Default: false
  • Example: sync_broadcast = false

If false, routing of notifications to subscribers is done in a separate Erlang process. As a consequence, some notifications may arrive to the subscribers in the wrong order (however, the two events would have to be published at the exact same time).

"},{"location":"modules/mod_pubsub/#cache-backend","title":"Cache Backend","text":"

Caching is disabled by default. You may enable it by specifying the backend it should use. It is not coupled with the main DB backend, so it is possible to store the cached data in mnesia, while the actual PubSub information is kept in RDBMS (and vice versa!).

"},{"location":"modules/mod_pubsub/#example-configuration","title":"Example Configuration","text":"
[modules.mod_pubsub]\n  access_createnode = \"pubsub_createnode\"\n  ignore_pep_from_offline = false\n  backend = \"rdbms\"\n  last_item_cache = \"mnesia\"\n  max_items_node = 1000\n  plugins = [\"flat\", \"pep\"]\n\n  [[modules.mod_pubsub.pep_mapping]]\n    namespace = \"urn:xmpp:microblog:0\"\n    node = \"mb\"\n
"},{"location":"modules/mod_pubsub/#nodetrees","title":"Nodetrees","text":"

Called on get, create and delete node. Only one nodetree can be used per host and is shared by all node plugins.

"},{"location":"modules/mod_pubsub/#tree","title":"\"tree\"","text":"

Stores nodes in a tree structure. Every node name must be formatted like a UNIX path (e.g. /top/middle/leaf). When a node is created, its direct ancestor must already exist, so in order to create /top/middle/leaf, /top/middle is needed. A user may create any top-level node. A user may create a subnode of a node, only if they own it or it was created by the service.

"},{"location":"modules/mod_pubsub/#dag","title":"\"dag\"","text":"

Provides experimental support for XEP-0248: PubSub Collection Nodes. In this case you should also add the \"dag\" node plugin as default, for example: plugins = [\"dag\", \"flat\", \"hometree\", \"pep\"].

"},{"location":"modules/mod_pubsub/#plugins","title":"Plugins","text":"

They handle affiliations, subscriptions and items and also provide default node con\ufb01guration and features. PubSub clients can define which plugin to use when creating a node by adding type='plugin-name' attribute to the create stanza element. If such an attribute is not specified, the default plugin will be the first on the plugin list.

"},{"location":"modules/mod_pubsub/#flat","title":"\"flat\"","text":"

No node hierarchy. It handles the standard PubSub case.

"},{"location":"modules/mod_pubsub/#hometree","title":"\"hometree\"","text":"

Uses the exact same features as the flat plugin but additionally organises nodes in a tree. Basically it follows a scheme similar to the filesystem's structure. Every user can create nodes in their own home root: e.g /home/user. Each node can contain items and/or sub-nodes.

"},{"location":"modules/mod_pubsub/#pep","title":"\"pep\"","text":"

Implementation of XEP-0163: Personal Eventing Protocol. In this case, items are not persisted but kept in an in-memory cache. When the pep plugin is enabled, a user can have their own node (exposed as their bare jid) with a common namespace. Requires module mod_caps to be enabled.

For XEP-0384: OMEMO Encryption, it might be required to configure the access_model to open or override the default access_model in the following way:

[modules.mod_pubsub]\n  access_createnode = \"pubsub_createnode\"\n  plugins = [\"pep\"]\n  default_node_config = {access_model = \"open\"}\n
"},{"location":"modules/mod_pubsub/#dag_1","title":"\"dag\"","text":"

Implementation of XEP-0248: PubSub Collection Nodes. Every node takes a place in a collection and becomes either a collection node (and have only sub-nodes) or a leaf node (contains only items).

"},{"location":"modules/mod_pubsub/#push","title":"\"push\"","text":"

Special node type that may be used as a target node for XEP-0357: Push Notifications capable services (e.g. mod_event_pusher_push). For each published notification, a hook push_notification is run. You may enable as many modules that support this hook (all module with mod_push_service_* name prefix) as you like (see for example mod_push_service_mongoosepush). This node type requires publish-options with at least device_id and service fields supplied.

"},{"location":"modules/mod_pubsub/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit the MongooseIM metrics page.

"},{"location":"modules/mod_pubsub/#overall-pubsub-action-metrics","title":"Overall PubSub action metrics","text":"

For every PubSub action, like node creation, subscription, publication the following metrics are available:

  • count - a spiral metric showing the number of given action invocations
  • errors - a spiral metric counting the errors for a given action
  • time - a histogram metric showing the time it took to finish the action in case of success

Below there is a table describing all metrics related to PubSub actions

Name Description (when it gets incremented) [HOST, pubsub, get, affiliations, TYPE] When node's affiliations are read [HOST, pubsub, get, configure, TYPE] When node's configuration is read [HOST, pubsub, get, default, TYPE] When node's defaults are read [HOST, pubsub, get, items, TYPE] When node's items are read [HOST, pubsub, get, options, TYPE] When node's options are read [HOST, pubsub, get, subscriptions, TYPE] When node's subscriptions are read [HOST, pubsub, set, affiliations, TYPE] When node's subscriptions are set [HOST, pubsub, set, configure, TYPE] When node's configuration is set [HOST, pubsub, set, create, TYPE] When node is created [HOST, pubsub, set, delete, TYPE] When node is deleted [HOST, pubsub, set, options, TYPE] When node's options are set [HOST, pubsub, set, publish, TYPE] When an item is published [HOST, pubsub, set, purge, TYPE] When node's items are purged [HOST, pubsub, set, retract, TYPE] When node's items are retracted [HOST, pubsub, set, subscribe, TYPE] When a subscriber subscribes to a node [HOST, pubsub, set, subscriptions, TYPE] When a subscription is set (for instance accepted) [HOST, pubsub, set, unsubscribe, TYPE] When a subscriber unsubscribes

Where:

  • HOST is the XMPP host for which mod_pubsub is running. Can be set to global if all metrics are set to be global.
  • TYPE is one of the following count, errors, time (described above the table)
"},{"location":"modules/mod_pubsub/#backend-operations","title":"Backend operations","text":"

The are also more detailed metrics measuring execution time of backend operations.

Metrics for these actions may be found under mod_pubsub_db subkey.

Backend action Description (when it gets incremented) get_state User's state for a specific node is fetched. get_states Node's states are fetched. get_states_by_lus Nodes' states for user + domain are fetched. get_states_by_bare Nodes' states for bare JID are fetched. get_states_by_full Nodes' states for full JID are fetched. get_own_nodes_states State data for user's nodes is fetched. create_node A node's owner is set. del_node All data related to a node is removed. get_items Node's items are fetched. get_item A specific item from a node is fetched. add_item An item is upserted into a node. set_item An item is updated in a node. del_item An item is deleted from a node. del_items Specified items are deleted from a node. set_node A node is upserted. find_node_by_id A node is fetched by its ID. find_nodes_by_key Nodes are fetched by key. find_node_by_name A node is fetched by its name. del_node A node is deleted. get_subnodes Subnodes of a node are fetched. get_subnodes_tree Full tree of subnodes of a node is fetched. get_parentnodes_tree All parents of a node are fetched."},{"location":"modules/mod_push_service_mongoosepush/","title":"mod_push_service_mongoosepush","text":""},{"location":"modules/mod_push_service_mongoosepush/#module-description","title":"Module Description","text":"

This module handles the push_notification hook generated by mod_pubsub with an active push node. Each push_notification hook is converted as a REST API call to the MongoosePush service. You can find the full list of supported publish-options here.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_push_service_mongoosepush/#prerequisites","title":"Prerequisites","text":"

This module uses a connection pool via mongoose_http_client. It must be defined in outgoing_pools setting.

"},{"location":"modules/mod_push_service_mongoosepush/#options","title":"Options","text":""},{"location":"modules/mod_push_service_mongoosepush/#modulesmod_push_service_mongoosepushpool_name","title":"modules.mod_push_service_mongoosepush.pool_name","text":"
  • Syntax: non-empty string
  • Default: \"undefined\"
  • Example: pool_name = \"mongoose_push_http\"

The name of the pool to use (as defined in outgoing_pools).

"},{"location":"modules/mod_push_service_mongoosepush/#modulesmod_push_service_mongoosepushapi_version","title":"modules.mod_push_service_mongoosepush.api_version","text":"
  • Syntax: string, \"v2\" or \"v3\"
  • Default: \"v3\"
  • Example: api_version = \"v3\"

REST API version to be used.

"},{"location":"modules/mod_push_service_mongoosepush/#modulesmod_push_service_mongoosepushmax_http_connections","title":"modules.mod_push_service_mongoosepush.max_http_connections","text":"
  • Syntax: non-negative integer
  • Default: 100
  • Example: max_http_connections = 100

The maximum amount of concurrent HTTP connections.

"},{"location":"modules/mod_push_service_mongoosepush/#example-configuration","title":"Example configuration","text":"
[outgoing_pools.http.mongoose_push_http]\n  scope = \"global\"\n  workers = 50\n\n  [outgoing_pools.http.mongoose_push_http.connection]\n    host = \"https://localhost:8443\"\n    path_prefix = \"/\"\n    request_timeout = 2000\n\n[modules.mod_push_service_mongoosepush]\n  pool_name = \"mongoose_push_http\"\n  api_version = \"v3\"\n  max_http_connections = 100\n
"},{"location":"modules/mod_register/","title":"mod_register","text":""},{"location":"modules/mod_register/#module-description","title":"Module Description","text":"

This module implements XEP-0077: In-Band Registration, allowing users to register accounts on the server via XMPP. Use of this module on Internet-facing servers is not recommended.

"},{"location":"modules/mod_register/#options","title":"Options","text":""},{"location":"modules/mod_register/#modulesmod_registeriqdisctype","title":"modules.mod_register.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_register/#modulesmod_registeraccess","title":"modules.mod_register.access","text":"
  • Syntax: string, rule name or \"all\"
  • Default: \"all\"
  • Example: access = \"all\"

Defines which access rule should be used for checking if a chosen username is allowed for registration.

"},{"location":"modules/mod_register/#modulesmod_registerwelcome_message","title":"modules.mod_register.welcome_message","text":"
  • Syntax: TOML table with the following keys: \"body\", \"subject\" and string values.
  • Default: {subject = \"\", body = \"\"}
  • Example: welcome_message = {subject = \"Hello from MIM!\", body = \"Message body.\"}

Body and subject of a <message> stanza sent to new users. Only one of the fields (but non-empty) is mandatory for the message to be sent.

"},{"location":"modules/mod_register/#modulesmod_registerregistration_watchers","title":"modules.mod_register.registration_watchers","text":"
  • Syntax: array of strings
  • Default: []
  • Example: registration_watchers = [\"JID1\", \"JID2\"]

List of JIDs, which should receive a <message> notification about every successful registration.

"},{"location":"modules/mod_register/#modulesmod_registerpassword_strength","title":"modules.mod_register.password_strength","text":"
  • Syntax: non-negative integer
  • Default: 0
  • Example: password_strength = 32

Specifies minimal entropy of allowed password. Entropy is measured with ejabberd_auth:entropy/1. When set to 0, the password strength is not checked. Recommended minimum is 32. The entropy calculation algorithm is described in a section below.

"},{"location":"modules/mod_register/#modulesmod_registerip_access","title":"modules.mod_register.ip_access","text":"
  • Syntax: Array of TOML tables with the following mandatory content:

    • address - string, IP address
    • policy - string, one of: \"allow\", \"deny\".
  • Default: []

  • Example: ip_access = [ {address = \"127.0.0.0/8\", policy = \"allow\"}, {address = \"0.0.0.0/0\", policy = \"deny\"} ]

Access list for specified IPs or networks. Default value allows registration from every IP.

"},{"location":"modules/mod_register/#example-configuration","title":"Example configuration","text":"

Allow registrations from localhost:

[modules.mod_register]\n  welcome_message = {subject = \"Hello from MIM!\", body = \"Message body.\"}\n  ip_access = [\n    {address = \"127.0.0.1\", policy = \"allow\"}\n  ]\n  access = \"register\"\n

Deny registration from network 10.20.0.0 with mask 255.255.0.0.

[modules.mod_register]\n  ip_access = [\n    {address = \"10.20.0.0/16\", policy = \"deny\"}\n  ]\n

"},{"location":"modules/mod_register/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Name Type Description (when it gets incremented) [Host, modRegisterCount] spiral A user registers via mod_register module. [Host, modUnregisterCount] spiral A user unregisters via mod_register module."},{"location":"modules/mod_register/#entropy-calculation-algorithm","title":"Entropy calculation algorithm","text":"
Entropy = length(Password) * log(X) / log(2)\n

Where X is initially set to 0 and certain values are added if at least one of these bytes are present:

  • Lower case character: 26
  • Upper case character: 26
  • Digit: 9
  • Printable ASCII (0x21 - 0x7e): 33
  • Any other value: 128

Note

These values are added only once, no matter how many bytes of specific type are found.

"},{"location":"modules/mod_register/#example-entropies","title":"Example entropies","text":"
  • kotek: ~23.5
  • abc123: ~30.8
  • L33tSp34k: ~53.4
  • CamelCase: ~51.3
  • lowUP1#:: ~45.9
  • lowUP1#\u2764: ~78
"},{"location":"modules/mod_roster/","title":"mod_roster","text":""},{"location":"modules/mod_roster/#module-description","title":"Module Description","text":"

The module implements roster support, specified in RFC 6121. Includes support for XEP-0237: Roster Versioning. It can sometimes become quite a heavyweight feature, so there is an option to disable it.

"},{"location":"modules/mod_roster/#options","title":"Options","text":""},{"location":"modules/mod_roster/#modulesmod_rosteriqdisctype","title":"modules.mod_roster.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_roster/#modulesmod_rosterversioning","title":"modules.mod_roster.versioning","text":"
  • Syntax: boolean
  • Default: false
  • Example: versioning = true

Turn on/off support for Roster Versioning.

"},{"location":"modules/mod_roster/#modulesmod_rosterstore_current_id","title":"modules.mod_roster.store_current_id","text":"
  • Syntax: boolean
  • Default: false
  • Example: store_current_id = true

Stores the last roster hash in DB (used in Roster Versioning). Improves performance but should be disabled, when shared rosters are used.

"},{"location":"modules/mod_roster/#modulesmod_rosterbackend","title":"modules.mod_roster.backend","text":"
  • Syntax: string, one of \"mnesia\", \"rdbms\"
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"
"},{"location":"modules/mod_roster/#example-configuration","title":"Example configuration","text":"
[modules.mod_roster]\n  versioning = true\n  store_current_id = true\n
"},{"location":"modules/mod_roster/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) read_roster_version Version of a user's roster is retrieved. write_roster_version Vversion of a user's roster is stored. get_roster A user's roster is fetched. get_roster_entry A specific roster entry is fetched. get_roster_entry_t A specific roster entry is fetched inside a transaction. get_subscription_lists A subscription list of a user is retrieved. roster_subscribe_t A subscription status between users is updated inside a transaction. update_roster_t A roster entry is updated in a transaction. del_roster_t A roster entry is removed inside a transaction."},{"location":"modules/mod_sasl2/","title":"mod_sasl2","text":""},{"location":"modules/mod_sasl2/#module-description","title":"Module Description","text":"

Implements XEP-0388: Extensible SASL Profile.

"},{"location":"modules/mod_shared_roster_ldap/","title":"mod_shared_roster_ldap","text":""},{"location":"modules/mod_shared_roster_ldap/#module-description","title":"Module Description","text":"

This module injects roster entries fetched from LDAP. It might get quite complicated to configure it properly, so proceed with caution.

Warning

This module does not support dynamic domains.

"},{"location":"modules/mod_shared_roster_ldap/#options-general","title":"Options: general","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldappool_tag","title":"modules.mod_shared_roster_ldap.pool_tag","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapbase","title":"modules.mod_shared_roster_ldap.base","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapderef","title":"modules.mod_shared_roster_ldap.deref","text":"

These 3 options are the same as for the LDAP authentication module.

"},{"location":"modules/mod_shared_roster_ldap/#options-attributes","title":"Options: attributes","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgroupattr","title":"modules.mod_shared_roster_ldap.groupattr","text":"
  • Syntax: string
  • Default: \"cn\"
  • Example: groupattr = \"cn\"

Provides a group name.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgroupdesc","title":"modules.mod_shared_roster_ldap.groupdesc","text":"
  • Syntax: string
  • Default: the value of groupattr
  • Example: groupdesc = \"cn\"

Provides a group description.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapuserdesc","title":"modules.mod_shared_roster_ldap.userdesc","text":"
  • Syntax: string
  • Default: \"cn\"
  • Example: userdesc = \"cn\"

Provides a human-readable user name.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapuseruid","title":"modules.mod_shared_roster_ldap.useruid","text":"
  • Syntax: string
  • Default: \"cn\"
  • Example: useruid = \"cn\"

Provides a username.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapmemberattr","title":"modules.mod_shared_roster_ldap.memberattr","text":"
  • Syntax: string
  • Default: \"memberUid\"
  • Example: memberattr = \"memberUid\"

Holds group members' IDs.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapmemberattr_format","title":"modules.mod_shared_roster_ldap.memberattr_format","text":"
  • Syntax: string
  • Default: \"%u\"
  • Example: memberattr_format = \"%u\"

Simple LDAP expression for extracting a user ID.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapmemberattr_format_re","title":"modules.mod_shared_roster_ldap.memberattr_format_re","text":"
  • Syntax: string
  • Default: \"\"
  • Example: memberattr_format_re = \"\"

Allows extracting the user ID with a regular expression.

"},{"location":"modules/mod_shared_roster_ldap/#options-parameters","title":"Options: parameters","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapauth_check","title":"modules.mod_shared_roster_ldap.auth_check","text":"
  • Syntax: boolean
  • Default: true
  • Example: auth_check = true

Enables checking if a shared roster entry actually exists in the XMPP database.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapuser_cache_validity","title":"modules.mod_shared_roster_ldap.user_cache_validity","text":"
  • Syntax: positive integer
  • Default: 300
  • Example: user_cache_validity = 300

Specifies in seconds how long are the roster entries kept in the cache.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgroup_cache_validity","title":"modules.mod_shared_roster_ldap.group_cache_validity","text":"
  • Syntax: positive integer
  • Default: 300
  • Example: group_cache_validity = 300

Specifies in seconds how long is the user's membership in a group kept in the cache.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapuser_cache_size","title":"modules.mod_shared_roster_ldap.user_cache_size","text":"
  • Syntax: positive integer
  • Default: 1000
  • Example: user_cache_size = 1000

Specifies how many shared roster items are kept in the cache.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgroup_cache_size","title":"modules.mod_shared_roster_ldap.group_cache_size","text":"
  • Syntax: positive integer
  • Default: 1000
  • Example: group_cache_size = 1000

Specifies how many roster group entries are kept in cache.

"},{"location":"modules/mod_shared_roster_ldap/#options-ldap-filters","title":"Options: LDAP filters","text":""},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldaprfilter","title":"modules.mod_shared_roster_ldap.rfilter","text":"
  • Syntax: string
  • Default: \"\"
  • Example: rfilter = \"\"

Used to find names of all shared roster groups.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapgfilter","title":"modules.mod_shared_roster_ldap.gfilter","text":"
  • Syntax: string
  • Default: \"\"
  • Example: gfilter = \"\"

Used for retrieving the human-readable name and the members of a group.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapufilter","title":"modules.mod_shared_roster_ldap.ufilter","text":"
  • Syntax: string
  • Default: \"\"
  • Example: ufilter = \"\"

Used for retrieving the human-readable name of the roster entries.

"},{"location":"modules/mod_shared_roster_ldap/#modulesmod_shared_roster_ldapfilter","title":"modules.mod_shared_roster_ldap.filter","text":"
  • Syntax: string
  • Default: \"\"
  • Example: filter = \"(objectClass=inetOrgPerson)\"

Filter AND-ed with previous filters.

"},{"location":"modules/mod_shared_roster_ldap/#example-configuration","title":"Example Configuration","text":"
[modules.mod_shared_roster_ldap]\n  base = \"ou=Users,dc=ejd,dc=com\"\n  groupattr = \"ou\"\n  memberattr = \"cn\"\n  userdesc = \"cn\"\n  filter = \"(objectClass=inetOrgPerson)\"\n  rfilter = \"(objectClass=inetOrgPerson)\"\n  group_cache_validity = 1\n  user_cache_validity = 1\n
"},{"location":"modules/mod_sic/","title":"mod_sic","text":""},{"location":"modules/mod_sic/#module-description","title":"Module Description","text":"

This module implements XEP-0279: Server IP Check. It allows clients to ask the server, what is the client IP and port from the server's perspective.

"},{"location":"modules/mod_sic/#options","title":"Options","text":""},{"location":"modules/mod_sic/#modulesmod_siciqdisctype","title":"modules.mod_sic.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_sic/#example-configuration","title":"Example Configuration","text":"
[modules.mod_sic]\n
"},{"location":"modules/mod_smart_markers/","title":"mod_smart_markers","text":""},{"location":"modules/mod_smart_markers/#module-description","title":"Module Description","text":"

Smart markers are an experimental feature, described in detail as our Open XMPP Extension for markers.

"},{"location":"modules/mod_smart_markers/#options","title":"Options","text":""},{"location":"modules/mod_smart_markers/#modulesmod_smart_markersiqdisc","title":"modules.mod_smart_markers.iqdisc","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming IQ requests. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_smart_markers/#modulesmod_smart_markersbackend","title":"modules.mod_smart_markers.backend","text":"
  • Syntax: string, one of \"rdbms\", \"rdbms_async\"
  • Default: \"rdbms\"
  • Example: backend = \"rdbms_async\"

Only RDBMS storage is supported, but rdbms means flushes to DB are synchronous with each message, while rdbms_async is instead asynchronous.

Regular rdbms has worse performance characteristics, but it has better consistency properties, as events aren't lost nor reordered. rdbms_async processes events asynchronously and potentially unloading a lot of aggregation from the DB. Like the case of the asynchronous workers for MAM, it is the preferred method, with the risk messages being lost on an ungraceful shutdown.

"},{"location":"modules/mod_smart_markers/#modulesmod_smart_markerskeep_private","title":"modules.mod_smart_markers.keep_private","text":"
  • Syntax: boolean
  • Default: false
  • Example: keep_private = true

This indicates if markers are meant to be private to the sender of the marker (setting keep_private as true), or if they can be public.

By default markers are public to the conversation where they are sent, so they'll be routed to all recipients, and anyone in the chat can see where its peers are at any time, i.e., the Facebook Messenger model; but they can be configured private, so markers won't be routed to anyone, and a user who fetches their status will only receive information for markers they have sent alone, i.e., the Slack model.

"},{"location":"modules/mod_smart_markers/#example-configuration","title":"Example configuration","text":"
[modules.mod_smart_markers]\n  backend = \"rdbms\"\n  iqdisc = \"parallel\"\n
"},{"location":"modules/mod_smart_markers/#implementation-details","title":"Implementation details","text":"

The current implementation has some limitations:

  • It does not verify that markers only move forwards, hence a user can, intentionally or accidentally, send a marker to an older message, and this would override newer ones.
  • It stores markers sent only for users served on a local domain. It does not store received markers, so if the peer is reached across federation, this module won't track markers for federated users. Therefore extensions that desire seeing not only the sender's markers but also the peer's markers, won't work with the current implementation across federated users.
"},{"location":"modules/mod_stream_management/","title":"mod_stream_management","text":""},{"location":"modules/mod_stream_management/#module-description","title":"Module Description","text":"

Enables XEP-0198: Stream Management. Implements logic regarding session resumption and acknowledgement as well as the management of the session tables and configuration.

"},{"location":"modules/mod_stream_management/#options","title":"Options","text":""},{"location":"modules/mod_stream_management/#modulesmod_stream_managementbackend","title":"modules.mod_stream_management.backend","text":"
  • Syntax: string: \"mnesia\" or \"cets\"
  • Default: \"mnesia\"
  • Example: backend = \"mnesia\"

Backend for in-memory session data stored by this module.

Warning

The corresponding internal database has to be enabled.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementbuffer","title":"modules.mod_stream_management.buffer","text":"
  • Syntax: boolean
  • Default: true
  • Example: buffer = false

Enables buffer for messages to be acknowledged.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementbuffer_max","title":"modules.mod_stream_management.buffer_max","text":"
  • Syntax: positive integer or string \"infinity\"
  • Default: 100
  • Example: buffer_max = 500

Buffer size for messages yet to be acknowledged.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementack","title":"modules.mod_stream_management.ack","text":"
  • Syntax: boolean
  • Default: true
  • Example: ack = false

Enables ack requests to be sent from the server to the client.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementack_freq","title":"modules.mod_stream_management.ack_freq","text":"
  • Syntax: positive integer
  • Default: 1
  • Example: ack_freq = 3

Frequency of ack requests sent from the server to the client, e.g. 1 means a request after each stanza, 3 means a request after each 3 stanzas.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementresume_timeout","title":"modules.mod_stream_management.resume_timeout","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 600
  • Example: resume_timeout = 600

Timeout for the session resumption. Sessions will be removed after the specified number of seconds.

"},{"location":"modules/mod_stream_management/#stale_h-options","title":"Stale_h options","text":"

Enables keeping old server's <h> values after the resumption timed out. Disabled by default. When enabled, parameters for the garbage collection of these tables should be provided.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementstale_henabled","title":"modules.mod_stream_management.stale_h.enabled","text":"
  • Syntax: boolean
  • Default: false
  • Example: enabled = true

Enables stale_h configuration

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementstale_hrepeat_after","title":"modules.mod_stream_management.stale_h.repeat_after","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 1800 (half an hour)
  • Example: repeat_after = 1800

How often the garbage collection will run in the background to clean this table.

"},{"location":"modules/mod_stream_management/#modulesmod_stream_managementstale_hgeriatric","title":"modules.mod_stream_management.stale_h.geriatric","text":"
  • Syntax: positive integer, value given in seconds
  • Default: 3600 (one hour)
  • Example: geriatric = 3600

The maximum lifespan of a record in memory. After this, they will be chased for cleanup.

"},{"location":"modules/mod_stream_management/#example-configuration","title":"Example Configuration","text":"
[modules.mod_stream_management]\n  buffer_max = 30\n  ack_freq = 1\n  resume_timeout = 600\n  stale_h.enabled = true\n  stale_h.repeat_after = 1800\n  stale_h.geriatric = 3600\n
"},{"location":"modules/mod_stream_management/#implementation-details","title":"Implementation details","text":"

Stream management state data is stored under the mod_stream_management key in the #c2s_data.state_mod map. The state data record, sm_state, has the following fields:

  • buffer - buffered stanzas not yet acked by the user
  • buffer_size - number of stanzas buffered for the user
  • counter_in - number of stanzas received by the server (server's <h>)
  • counter_out - number of stanzas delivered to the user and acked by the user (user's <h>)
  • buffer_max - server's capacity for buffering
  • ack_freq - how often the server requests acks
  • peer - in case of stream resumption, the ejabberd_sm:sid() identifiying the old session, or gen_statem:from() identifying the new session.

mod_stream_management introduces a new resume_session state to the C2S state machine, that is used by a session being closed to allow stream resumption.

This module also has a Mnesia backend keeping a table defined as follows:

-record(sm_session,\n        {smid :: smid(),\n         sid :: ejabberd_sm:sid()\n        }).\n

where smid is a unique identifier \u2014 in this case a random binary, and sid is an opaque session identifier from ejabberd_sm, which is needed to find the previous session we want to resume from. This module implements hooks that run on connection removals and session cleanups, in order to clean records from a dying session; and it also implements registration callbacks, used when a session is registered for resumption.

XEP version 1.6 requires the server to attempt giving the user the value of the server's <h> when a session timed out and cannot be resumed anymore. To be compliant with it, there's a second optional table:

-record(stream_mgmt_stale_h,\n        {smid :: smid(),\n         h :: non_neg_integer(),\n         stamp :: non_neg_integer()\n        }).\n

This table is created, together with a gen_server responsible for cleaning up the tables, when stale_h is set to true with the proper garbage collection configuration. Then, when removing a record from the sm_session table (which happens when the state of the previous session is also dropped), a new record is added to this new table with the smid and h values of the dropped session, together with a timestamp. Next, when a new session attempting resumption queries mod_stream_management for the data behind a smid, mod_stream_management can answer with one of the following:

{sid, ejabberd_sm:sid()} | {stale_h, non_neg_integer()} | {error, smid_not_found}.\n

And mod_stream_management will pattern-match and act accordingly.

"},{"location":"modules/mod_time/","title":"mod_time","text":""},{"location":"modules/mod_time/#module-description","title":"Module Description","text":"

This module enables support for communicating the local time of an entity. It reports time in UTC according to the entity as well as the offset from UTC. Protocol is described under XEP-0202: Entity Time.

"},{"location":"modules/mod_time/#options","title":"Options","text":""},{"location":"modules/mod_time/#modulesmod_timeiqdisctype","title":"modules.mod_time.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"one_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_time/#example-configuration","title":"Example Configuration","text":"
[modules.mod_time]\n
"},{"location":"modules/mod_vcard/","title":"mod_vcard","text":""},{"location":"modules/mod_vcard/#module-description","title":"Module Description","text":"

This module provides support for vCards, as specified in XEP-0054: vcard-temp and XEP-0055: Jabber Search.

"},{"location":"modules/mod_vcard/#options","title":"Options","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardiqdisctype","title":"modules.mod_vcard.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"parallel\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_vcard/#modulesmod_vcardhost","title":"modules.mod_vcard.host","text":"
  • Syntax: string
  • Default: \"vjud.@HOST@\"
  • Example: host = \"vjud.@HOST@\"

Domain of the vCard User Directory, used for searching. @HOST@ is replaced with the domain(s) supported by the cluster.

"},{"location":"modules/mod_vcard/#modulesmod_vcardsearch","title":"modules.mod_vcard.search","text":"
  • Syntax: boolean
  • Default: true
  • Example: search = false

Enables/disables the domain set in the previous option. false makes searching for users impossible.

"},{"location":"modules/mod_vcard/#modulesmod_vcardbackend","title":"modules.mod_vcard.backend","text":"
  • Syntax: string, one of \"ldap\", \"rdbms\", \"mnesia\"
  • Default: \"mnesia\"
  • Example: backend = \"rdbms\"

vCard storage backend.

Warning

LDAP backend is read-only.

"},{"location":"modules/mod_vcard/#modulesmod_vcardmatches","title":"modules.mod_vcard.matches","text":"
  • Syntax: non-negative integer or the string \"infinity\"
  • Default: 30
  • Example: matches = 10

Maximum search results to be returned to the user.

"},{"location":"modules/mod_vcard/#ldap-specific-options","title":"LDAP-specific options","text":"

The following options are the same as for the LDAP authentication module:

"},{"location":"modules/mod_vcard/#modulesmod_vcardldappool_tag","title":"modules.mod_vcard.ldap.pool_tag","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapbase","title":"modules.mod_vcard.ldap.base","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapuids","title":"modules.mod_vcard.ldap.uids","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapfilter","title":"modules.mod_vcard.ldap.filter","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapderef","title":"modules.mod_vcard.ldap.deref","text":""},{"location":"modules/mod_vcard/#modulesmod_vcardldapvcard_map","title":"modules.mod_vcard.ldap.vcard_map","text":"
  • Syntax: Array of TOML tables with the following keys: \"vcard_field\", \"ldap_pattern\", \"ldap_field\" and string values.
  • Default: see description
  • Example: vcard_map = [{vcard_field = \"FN\", ldap_pattern = \"%s\", ldap_field = \"displayName\"}]

Mappings between VCard and LDAP fields. For the default settings, please see [MongooseIM root]/src/mod_vcard_ldap.erl.

"},{"location":"modules/mod_vcard/#modulesmod_vcardldapsearch_fields","title":"modules.mod_vcard.ldap.search_fields","text":"
  • Syntax: Array of TOML tables with the following keys: \"search_field\", \"ldap_field\" and string values.
  • Default: see description
  • Example: search_fields = [{search_field = \"User\", ldap_field = \"%u\"}]

Mappings between the human-readable search fields and LDAP fields. For the default settings, please see [MongooseIM root]/src/mod_vcard_ldap.erl.

"},{"location":"modules/mod_vcard/#modulesmod_vcardldapsearch_reported","title":"modules.mod_vcard.ldap.search_reported","text":"
  • Syntax: Array of TOML tables with the following keys: \"search_field\", \"vcard_field\" and string values.
  • Default: see description
  • Example: search_reported = [{search_field = \"Full Name\", vcard_field = \"FN\"}]

Mappings between the human-readable search fields and VCard fields. For the default settings, please see [MongooseIM root]/src/mod_vcard_ldap.erl.

"},{"location":"modules/mod_vcard/#modulesmod_vcardldapsearch_operator","title":"modules.mod_vcard.ldap.search_operator","text":"
  • Syntax: string, one of \"or\", \"and\"
  • Default: \"and\"
  • Example: search_operator = \"or\"

A default operator used for search query items.

"},{"location":"modules/mod_vcard/#modulesmod_vcardldapbinary_search_fields","title":"modules.mod_vcard.ldap.binary_search_fields","text":"
  • Syntax: array of strings
  • Default: []
  • Example: binary_search_fields = [\"User\", \"Full Name\"]

An array of search fields, which values should be Base64-encoded by MongooseIM before sending to LDAP.

"},{"location":"modules/mod_vcard/#example-configuration","title":"Example Configuration","text":"
[modules.mod_vcard]\n  matches = 1\n  search = true\n  host = \"directory.example.com\"\n\n  [[modules.mod_vcard.ldap.vcard_map]]\n    vcard_field = \"FAMILY\"\n    ldap_pattern = \"%s\"\n    ldap_field = \"sn\"\n\n  [[modules.mod_vcard.ldap.vcard_map]]\n    vcard_field = \"FN\"\n    ldap_pattern = \"%s\"\n    ldap_field = \"displayName\"\n\n  [[modules.mod_vcard.ldap.search_fields]]\n    search_field = \"User\"\n    ldap_field = \"%u\"\n\n  [[modules.mod_vcard.ldap.search_fields]]\n    search_field = \"Full Name\"\n    ldap_field = \"displayName\"\n\n  [[modules.mod_vcard.ldap.search_reported]]\n    search_field = \"Full Name\"\n    vcard_field = \"FN\"\n\n  [[modules.mod_vcard.ldap.search_reported]]\n    search_field = \"Given Name\"\n    vcard_field = \"FIRST\"\n
"},{"location":"modules/mod_vcard/#metrics","title":"Metrics","text":"

If you'd like to learn more about metrics in MongooseIM, please visit MongooseIM metrics page.

Backend action Description (when it gets incremented) set_vcard A vCard is set in a DB. get_vcard A specific vCard is retrieved from a DB. search A vCard search is performed."},{"location":"modules/mod_version/","title":"mod_version","text":""},{"location":"modules/mod_version/#module-description","title":"Module description","text":"

This module provides the functionality specified in XEP-0092: Software Version.

"},{"location":"modules/mod_version/#options","title":"Options","text":""},{"location":"modules/mod_version/#modulesmod_versioniqdisctype","title":"modules.mod_version.iqdisc.type","text":"
  • Syntax: string, one of \"one_queue\", \"no_queue\", \"queues\", \"parallel\"
  • Default: \"no_queue\"

Strategy to handle incoming stanzas. For details, please refer to IQ processing policies.

"},{"location":"modules/mod_version/#modulesmod_versionos_info","title":"modules.mod_version.os_info","text":"
  • Syntax: boolean
  • Default: false
  • Example: os_info = true

Determines whether information about the operating system will be included.

"},{"location":"modules/mod_version/#example-configuration","title":"Example configuration","text":"
[modules.mod_version]\n  os_info = true\n
"},{"location":"open-extensions/inbox/","title":"Inbox","text":"

When a messaging client starts, it typically builds a UI showing a list of recent chats, with metadata attached to them like, whether any chat has new messages and how many, or if it is fully read, or if they are for example muted and until when. In MongooseIM this functionality is provided by mod_inbox.

"},{"location":"open-extensions/inbox/#terminology","title":"Terminology:","text":""},{"location":"open-extensions/inbox/#the-inbox","title":"The Inbox","text":"

It is personal to a given user and represents the current status of the conversations of that user. It's the front-page of the chat feature.

"},{"location":"open-extensions/inbox/#inbox-entry","title":"Inbox entry","text":"

It is a specific conversation, that the user can identify by the recipient jid, that is, the user jid in case of a one-to-one chat, or the room jid in case of a group-chat.

"},{"location":"open-extensions/inbox/#box-also-referred-to-as-folder","title":"Box (also referred to as \"folder\")","text":"

A category where entries can be classified. The default box is the active box, simply called inbox. There is a second box, called archive, where entries can be thrown into and not displayed by default. More boxes can be created through configuration.

"},{"location":"open-extensions/inbox/#entity-use-cases","title":"Entity Use Cases","text":""},{"location":"open-extensions/inbox/#discovering-inbox-services","title":"Discovering Inbox Services","text":"

An entity can discover the inbox service via a Features Discovery request:

<!-- Client -->\n<iq type='get' id='a96d4244760853af7b3ae84faa1a40fb' to='localhost'>\n    <query xmlns='http://jabber.org/protocol/disco#info'/>\n</iq>\n\n<!-- Server -->\n<iq from='localhost' to='alice@localhost/res1' id='a96d4244760853af7b3ae84faa1a40fb' type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#info'>\n        <identity category='server' type='im' name='MongooseIM'/>\n        <feature var='erlang-solutions.com:xmpp:inbox:0'/>\n    </query>\n</iq>\n

"},{"location":"open-extensions/inbox/#fetching-the-inbox","title":"Fetching the inbox","text":""},{"location":"open-extensions/inbox/#querying","title":"Querying","text":"

The inbox is fetched using regular XMPP Data Forms. To request the supported form, the client should send:

<!-- Client -->\n<iq type='get' id='some_unique_id'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0'/>\n</iq>\n\n<!-- Server -->\n<iq from='alice@localhost' to='alice@localhost/res1' id='some_unique_id' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <x xmlns='jabber:x:data' type='form'>\n      <field type='hidden' var='FORM_TYPE'><value>erlang-solutions.com:xmpp:inbox:0</value></field>\n      <field var='start' type='text-single'/>\n      <field var='end' type='text-single'/>\n      <field var='order' type='list-single'>\n        <value>desc</value>\n        <option label='Ascending by timestamp'><value>asc</value></option>\n        <option label='Descending by timestamp'><value>desc</value></option>\n      </field>\n      <field var='hidden_read' type='text-single' value='false'/>\n      <field var='box' type='list-simple' value='all'>\n        <option label='all'><value>all</value></option>\n        <option label='inbox'><value>inbox</value></option>\n        <option label='archive'><value>archive</value></option>\n        <option label='bin'><value>bin</value></option>\n      </field>\n      <field var='archive' type='boolean'/>\n    </x>\n  </query>\n</iq>\n

To fetch the inbox, the client should send:

<iq type='set' id='10bca'>\n  <inbox xmlns='erlang-solutions.com:xmpp:inbox:0' queryid='b6'/>\n</iq>\n

Then the client should receive:

<message from=\"alice@localhost\" to=\"alice@localhost/res1\" id=\"9b759\">\n  <result xmlns=\"erlang-solutions.com:xmpp:inbox:0\" unread=\"0\" queryid=\"b6\">\n    <forwarded xmlns=\"urn:xmpp:forward:0\">\n      <delay xmlns=\"urn:xmpp:delay\" stamp=\"2018-07-10T23:08:25.123456Z\"/>\n      <message xml:lang=\"en\" type=\"chat\" to=\"bob@localhost/res1\" from=\"alice@localhost/res1\" id=\u201d123\u201d>\n        <body>Hello</body>\n      </message>\n    </forwarded>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n  </result>\n</message>\n\n<iq from=\"alice@localhost\" to=\"alice@localhost/res1\" id=\"b6\" type=\"result\">\n  <fin xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <count>1</count>\n    <unread-messages>0</unread-messages>\n    <active-conversations>0</active-conversations>\n  </fin>\n</iq>\n
where none-or-many message stanzas are sent to the requesting resource describing each inbox entry, and a final iq-fin stanza marks the end of the inbox query, Inbox query result IQ stanza returns the following values:

  • count: the total number of conversations (if hidden_read value was set to true, this value will be equal to active_conversations)
  • unread-messages: total number of unread messages from all conversations
  • active-conversations: the number of conversations with unread message(s)

Note that the queryid field is optional, and if not provided, the answers will fall back to the id field of the IQ query.

"},{"location":"open-extensions/inbox/#filtering-and-ordering","title":"Filtering and ordering","text":"

Inbox query results may be filtered by time range and box, and sorted by timestamp. By default, mod_inbox returns all conversations, listing the ones updated most recently first.

A client may specify the following parameters:

  • variable start: Start date for the result set (value: ISO timestamp)
  • variable end: End date for the result set (value: ISO timestamp)
  • variable order: Order by timestamp (values: asc, desc)
  • variable hidden_read: Show only conversations with unread messages (values: true, false)
  • variable box: Indicate which box is desired. Supported are all, inbox, archive and bin. More boxes can be implemented, see mod_inbox \u2013 Boxes. If not provided, all except the bin are returned.
  • variable archive [deprecated, prefer box]: whether to query the archive inbox. true means querying only the archive box, false means querying only the active box. If the flag is not set, it is assumed all entries are requested. This is kept for backwards compatibility reasons, use the box flag instead.

They are encoded inside a standard XMPP Data Forms format. Dates must be formatted according to XMPP Date and Time Profiles. It is not mandatory to add an empty data form if a client prefers to use default values (<inbox/> element may be empty). However, the IQ type must be \"set\", even when the data form is missing.

"},{"location":"open-extensions/inbox/#limiting-the-query","title":"Limiting the query","text":"

It can happen that the amount of inbox entries is too big for a given user, even after filtering by start and end as already available in mod_inbox. Hence, we need to set a fixed limit of the number of entries that are requested. For this, we can use a <max> attribute as defined in XEP-0059: #2.1 Limiting the Number of Items:

<iq type='set' id='10bca'>\n  <inbox xmlns='erlang-solutions.com:xmpp:inbox:0' queryid='b6'>\n    <x xmlns='jabber:x:data' type='form'>\n      <field type='hidden' var='FORM_TYPE'><value>erlang-solutions.com:xmpp:inbox:0</value></field>\n      <field type='list-single' var='order'><value>asc</value></field>\n      <field type='text-single' var='hidden_read'><value>true</value></field>\n      <field type='list-single' var='box'><value>inbox</value></field>\n    </x>\n    <set xmlns='http://jabber.org/protocol/rsm'>\n      <max>Max</max>\n    </set>\n  </inbox>\n</iq>\n
where Max is a non-negative integer.

Inbox also has partial support for pagination as described in XEP-0059. Note that therefore there are two ways to denote pages, the standard RSM mechanism and the custom inbox form. If both are used, the RSM marker will override the respective inbox form, as in, before will override start, and after will override end.

Note

Inbox pagination does not support total count nor indexes as described in XEP-0059: #2.6 Retrieving a Page Out of Order.

"},{"location":"open-extensions/inbox/#properties-of-an-entry","title":"Properties of an entry","text":"

Given an entry, certain properties are defined for such an entry:

"},{"location":"open-extensions/inbox/#box","title":"Box","text":"

Clients usually have two different boxes for the inbox: the regular one, simply called the inbox (or the active inbox), and an archive box, where clients can manually throw conversations they don't want displayed in the default UI. A third box is the trash bin, where deleted entries go and are cleaned up in regular intervals.

It is expected that entries will reside in the archive until they're either manually moved back to the active box, or they receive a new message: in such case the entry should jump back to the active box automatically.

More boxes can be implemented, see mod_inbox#boxes. Movement between boxes can be achieved through the right XMPP IQ, no more automatic movements are developed as in the case of inbox-archive.

"},{"location":"open-extensions/inbox/#read","title":"Read","text":"

Entries keep a count of unread messages that is incremented automatically upon receiving a new message, and (in the current implementation) set to zero upon receiving either a message by one-self, or an appropriate chat marker as defined in XEP-0333 (which markers reset the count is a matter of configuration, see doc).

This property can also be manually set to zero or to one using the appropriate requests as explained below.

"},{"location":"open-extensions/inbox/#muted","title":"Muted","text":"

Entries can be muted for given periods of time, and likewise, unmuted. This changes the UI representation, and also, means that the user won't get PNs (Push Notifications) for this entry, until the time set expires, or the user sets otherwise. Knowledge of this is necessary to help build the UI.

Expected times can be extended before the period has expired, without the need to first unmuting. When muting a conversation, the final timestamp will be calculated by the server as the current time plus the requested period, in seconds, to centralise knowledge of UTC clocks. When muting an already muted conversation, the timestamp is simply overridden following the previous specification.

"},{"location":"open-extensions/inbox/#other-properties","title":"Other properties","text":"

No more properties are expected, but one could envisage notions of flagging conversations with different colours, for example according to their urgency, or a client-specific category (work, personal, fitness, and whatnot), or pins to denote an entry should be always displayed (possibly in a special format, like on top of the box). The design of the protocol, and the implementation, aims to leave room for future extensions.

"},{"location":"open-extensions/inbox/#getting-properties","title":"Getting properties","text":"

To fetch all supported properties, a classic Data Form is used. Upon the client sending an iq-get without a jid:

<iq id='some_unique_id' type='get'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation'/>\n</iq>\n
The server would respond with:
<iq from='alice@localhost' to='alice@localhost/res1' id='some_unique_id' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation'>\n    <x xmlns='jabber:x:data' type='form'>\n      <field type='hidden' var='FORM_TYPE'><value>erlang-solutions.com:xmpp:inbox:0</value></field>\n      <field var='archive' type='boolean' value='false'/>\n      <field var='read' type='boolean' value='false'/>\n      <field var='mute' type='text-single' value='0'/>\n      <field var='box' type='list-simple' value='all'>\n        <option label='all'><value>all</value></option>\n        <option label='inbox'><value>inbox</value></option>\n        <option label='archive'><value>archive</value></option>\n        <option label='bin'><value>bin</value></option>\n      </field>\n    </x>\n  </query>\n</iq>\n

If the properties of a certain entry were to be fetched, it can easily be done with:

<iq id='some_unique_id' type='get'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'/>\n</iq>\n
To which the server will reply, just like before, with:
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation'>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </query>\n</iq>\n

If an entire entry wanted to be queried, and not only its attributes, a complete='true' can be provided:

<iq id='some_unique_id' type='get'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost' complete='true'/>\n</iq>\n
To which the server will reply, just like before, with:
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation'>\n    <forwarded xmlns=\"urn:xmpp:forward:0\">\n      <delay xmlns=\"urn:xmpp:delay\" stamp=\"2018-07-10T23:08:25.123456Z\"/>\n      <message xml:lang=\"en\" type=\"chat\" to=\"bob@localhost/res1\" from=\"alice@localhost/res1\" id=\u201d123\u201d>\n        <body>Hello</body>\n      </message>\n    </forwarded>\n    <archive>false</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </query>\n</iq>\n

"},{"location":"open-extensions/inbox/#setting-properties","title":"Setting properties","text":"

Setting properties is done using the standard XMPP pattern of iq-query and iq-result, as below:

<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <Property>Value</Property>\n    <!-- Possibly other properties -->\n  </query>\n</iq>\n
where Property and Value are a list of key-value pairs as follows:

  • box: inbox, archive, or a custom value if this has been extended.
  • archive: true or false
  • mute: number of seconds to mute for. Choose 0 for unmuting.
  • read (adjective, not verb): true or false. Setting to true essentially sets the unread-count to 0, false sets the unread-count to 1 (if it was equal to 0, otherwise it lefts it unchanged). No other possibilities are offered, to reduce the risk of inconsistencies or problems induced by a faulty client.

Note that resetting the inbox count will not be forwarded. While a chat marker will be forwarded to the interlocutor(s), (including the case of a big groupchat with thousands of participants), this reset stanza will not.

If the query was successful, the server will answer with two stanzas, following the classic pattern of broadcasting state changes. First, it would send a message with a <x> children containing all new configuration, to the bare-jid of the user: this facilitates broadcasting to all online resources to successfully synchronise their interfaces.

<message from='alice@localhost' to='alice@localhost' id='some_unique_id'>\n  <x xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </x>\n</message>\n
where <mute> may contain either a zero, to denote unmuted, or a RFC3339 timestamp, as in 2021-02-25T08:44:14.323836Z.

To the requesting resource, a simple iq-result would be then sent to notify of success, as required by the iq directives of the XMPP RFCs:

<iq id='some_unique_id' to='alice@localhost/res1' type='result'/>\n

If the request was not successful, the server would then answer as in:

<iq to='alice@localhost/res1' type='error'>\n  <error type='Type'>\n    <Condition xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n  </error>\n</iq>\n
Where Type will usually be modify or cancel, as explained in https://xmpp.org/rfcs/rfc6120.html#stanzas-error-syntax, and Condition is as explained in https://xmpp.org/rfcs/rfc6120.html#stanzas-error-conditions, bad-request being the most common.

This final syntax for the protocol has been chosen as it allows for better pipelining of requests, and it remains consistent with how, for example, rooms are configured for MUC-Light.

"},{"location":"open-extensions/inbox/#examples-archiving-an-entry","title":"Examples: archiving an entry","text":"

To put an entry into the archived box, the client can send:

<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>archive</box>\n  </query>\n</iq>\n
On success, the server would return (considering the entry has no unread messages and is not muted):
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>archive</box>\n    <archive>true</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </query>\n</iq>\n
If the client had sent an invalid number (negative, or NaN), the server would answer:
<iq to='alice@localhost/res1' type='error'>\n  <error type='modify'>\n    <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n  </error>\n</iq>\n

"},{"location":"open-extensions/inbox/#examples-emptying-the-trash-bin","title":"Examples: emptying the trash bin","text":"

A user can empty his trash bin, through the following request:

<iq id='some_unique_id' type='set'>\n  <empty-bin xmlns='erlang-solutions.com:xmpp:inbox:0'/>\n</iq>\n
On success, the server would return how many entries where dropped as in:
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <empty-bin xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <num>2</num>\n  </empty-bin>\n</iq>\n
The server might answer with a corresponding error message, might anything go wrong.

"},{"location":"open-extensions/inbox/#examples-muting-an-entry","title":"Examples: muting an entry","text":"

To mute an entry for a full day (86400 seconds in a day, 604800 in a week, for example), a client can send:

<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <mute>86400</mute>\n  </query>\n</iq>\n
On success, the server would return (considering the server receives the timestamp on \"2021-02-26T09:11:05.634232Z\", and the entry is on the active box and completely read):
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>2021-02-27T09:11:05.634232Z</mute>\n    <read>true</read>\n  </query>\n</iq>\n
If the client had sent an invalid number (negative, or NaN), the server would answer:
<iq to='alice@localhost/res1' type='error'>\n  <error type='modify'>\n    <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n  </error>\n</iq>\n
To unmute, similarly, the client can send:
<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <mute>0</mute>\n  </query>\n</iq>\n
And server responses will be similar.

"},{"location":"open-extensions/inbox/#examples-reading-an-entry","title":"Examples: reading an entry","text":"

To set an entry as read, the client can send:

<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <read>true</read>\n  </query>\n</iq>\n
On success, the server would return (considering the entry is not archived and not muted):
<iq id='some_unique_id' to='alice@localhost/res1' type='result'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n    <read>true</read>\n  </query>\n</iq>\n
On error, as usual, the client would get:
<iq to='alice@localhost/res1' type='error'>\n  <error type='modify'>\n    <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n  </error>\n</iq>\n
And similarly, to set a conversation as unread:
<iq id='some_unique_id' type='set'>\n  <query xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='bob@localhost'>\n    <read>false</read>\n  </query>\n</iq>\n

"},{"location":"open-extensions/inbox/#deprecated-reset-entry-stanza","title":"Deprecated reset entry stanza:","text":"

You can reset the inbox with the following stanza:

<iq type='set'>\n    <reset xmlns='erlang-solutions.com:xmpp:inbox:0#conversation' jid='interlocutor_bare_jid'/>\n</iq>\n
Here jid is the bare jid of the user whose inbox we want to reset. This action does not change the last message stored in inbox; meaning that neither this stanza nor anything given within will be stored; the only change is the inbox unread_count is set to zero.

"},{"location":"open-extensions/inbox/#example-request","title":"Example request","text":"
<!-- Alice sends: -->\n<message type=\"chat\" to=\"bob@localhost/res1\" id=\u201d123\u201d>\n  <body>Hello</body>\n</message>\n\n<!-- Bob receives: -->\n<message from=\"alice@localhost/res1\" to=\"bob@localhost/res1\" id=\u201c123\u201d xml:lang=\"en\" type=\"chat\">\n  <body>Hello</body>\n</message>\n\n<!-- Alice sends: -->\n<iq type=\"set\" id=\"10bca\">\n  <inbox xmlns=\"erlang-solutions.com:xmpp:inbox:0\" queryid=\"b6\">\n    <x xmlns='jabber:x:data' type='form'>\n      <field type='hidden' var='FORM_TYPE'><value>erlang-solutions.com:xmpp:inbox:0</value></field>\n      <field type='text-single' var='start'><value>2018-07-10T12:00:00Z</value></field>\n      <field type='text-single' var='end'><value>2018-07-11T12:00:00Z</value></field>\n      <field type='list-single' var='order'><value>asc</value></field>\n    </x>\n  </inbox>\n</iq>\n\n<!-- Alice receives: -->\n<message from=\"alice@localhost\" to=\"alice@localhost\" id=\"9b759\">\n  <result xmlns=\"erlang-solutions.com:xmpp:inbox:0\" unread=\"0\" queryid=\"b6\">\n    <forwarded xmlns=\"urn:xmpp:forward:0\">\n      <delay xmlns=\"urn:xmpp:delay\" stamp=\"2018-07-10T23:08:25.123456Z\"/>\n      <message xml:lang=\"en\" type=\"chat\" to=\"bob@localhost/res1\" from=\"alice@localhost/res1\" id=\u201d123\u201d>\n        <body>Hello</body>\n      </message>\n    </forwarded>\n    <box>inbox</box>\n    <archive>false</archive>\n    <mute>0</mute>\n  </result>\n</message>\n\n<iq from=\"alice@localhost\" to=\"alice@localhost/res1\" id=\"10bca\" type=\"result\">\n  <fin xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <count>1</count>\n    <unread-messages>0</unread-messages>\n    <active-conversations>0</active-conversations>\n  </fin>\n</iq>\n
"},{"location":"open-extensions/inbox/#example-error-response","title":"Example error response","text":"
<!--Alice sends request with invalid value of start field: -->\n<iq type='set' id='a78478f20103ff8354d7834d0ba2fdb2'>\n  <inbox xmlns='erlang-solutions.com:xmpp:inbox:0'>\n    <x xmlns='jabber:x:data' type='submit'>\n      <field type='text-single' var='start'>\n        <value>invalid</value>\n      </field>\n    </x>\n  </inbox>\n</iq>\n\n<!--Alice receives an error with description of the first encountered invalid value: -->\n<iq from='alice@localhost' to='alice@localhost/res1'\n    id='a78478f20103ff8354d7834d0ba2fdb2' type='error'>\n  <error code='400' type='modify'>\n    <bad-rquest xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    <text xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'>\n      Invalid inbox form field value, field=start, value=invalid\n    </text>\n  </error>\n</iq>\n
"},{"location":"open-extensions/mam/","title":"Message Archive Management extensions","text":""},{"location":"open-extensions/mam/#new-mam-filtering-fields","title":"New MAM filtering fields","text":"

The new fields allow to improve the performance of the counting queries for very big archives by changing how count and index functions work.

  • from-id - returns and counts messages with ids id >= from-id only (from-id is included into the set).
  • to-id - returns and counts messages with ids id <= to-id only (to-id is included into the set).
  • after-id - returns and counts messages with ids id > after-id only (after-id is not included into the set).
  • before-id - returns and counts messages with ids id < before-id only (before-id is not included into the set).
  • simple - do not return count and offset fields in the result.

The fields could be combined together. If two filters are provided, both would be applied to the result.

"},{"location":"open-extensions/mam/#get-new-messages-oldest-first","title":"Get new messages, oldest first","text":"

Example from pagination_first_page_after_id4 testcase:

The client has downloaded his archive and got disconnected. He knows, that the last message he has on his device has id=BO7CH1JOF801. He wants to receive new messages that were sent while he has been disconnected using a page size 5.

In this mode, the client would get the oldest messages first.

Testcase: the client has messages 1-15 in his archive.

<!-- Client sends -->\n<iq type='set' id='req1'>\n    <query xmlns='urn:xmpp:mam:1' queryid='first_page_after_id4'>\n        <x xmlns='jabber:x:data'>\n            <field var='after-id'>\n                <value>BO7CH1JOF801</value> <!-- id of the Message #4 -->\n            </field>\n        </x>\n        <set>\n            <max>5</max>\n        </set>\n    </query>\n</iq>\n\n<!-- Server sends -->\n<message from='alice@localhost' to='alice@localhost/res1' id='323372af-7d69-4f36-803d-110272066373'>\n    <result queryid='first_page_after_id4' xmlns='urn:xmpp:mam:1' id='BO7CH1JQR9O1'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2022-06-08T09:43:08.952999Z' from='alice@localhost/res1'/>\n            <message from='alice@localhost/res1' xmlns='jabber:client' xml:lang='en' to='bob@localhost/res1' type='chat'>\n                <body>Message #5</body>\n            </message>\n        </forwarded>\n    </result>\n</message>\n<!-- ... Messages 6, 7, 8  ... -->\n<message from='alice@localhost' to='alice@localhost/res1' id='a44d83f3-de47-4e71-a1e6-62100437fe2c'>\n    <result queryid='first_page_after_id4' xmlns='urn:xmpp:mam:1' id='BO7CH1K3TU01'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2022-06-08T09:43:08.990200Z' from='alice@localhost/res1'/>\n            <message from='alice@localhost/res1' xmlns='jabber:client' xml:lang='en' to='bob@localhost/res1' type='chat'>\n                <body>Message #9</body>\n            </message>\n        </forwarded>\n    </result>\n</message>\n\n<iq from='alice@localhost' to='alice@localhost/res1' id='req1' type='result'>\n    <fin xmlns='urn:xmpp:mam:1'>\n        <set xmlns='http://jabber.org/protocol/rsm'>\n            <first index='0'>BO7CH1JQR9O1</first> <!-- Id of the message #5 -->\n            <last>BO7CH1K3TU01</last> <!-- Id of the message #9 -->\n            <count>11</count> <!-- messages 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 -->\n        </set>\n    </fin>\n</iq>\n

Messages 1-4 are completely ignored in the count and in the index fields. If the client asked for 5 messages, but count is 11, he should ask for more messages.

<!-- Client sends -->\n<iq type='set' id='req2'>\n    <query xmlns='urn:xmpp:mam:1' queryid='first_page_after_id9'>\n        <x xmlns='jabber:x:data'>\n            <field var='after-id'>\n                <value>BO7CH1K3TU01</value> <!-- id of the Message #9 -->\n            </field>\n        </x>\n        <set>\n            <max>5</max>\n        </set>\n    </query>\n</iq>\n...\n
"},{"location":"open-extensions/mam/#get-new-messages-newest-first","title":"Get new messages, newest first","text":"

Sometimes we want to render the newest messages as fast as possible.

Though, if the client caches messages, he has to track which pages are still need to be requested, when using this method.

Example pagination_last_page_after_id4.

<!-- Client sends -->\n<iq type='set' id='req3'>\n    <query xmlns='urn:xmpp:mam:1' queryid='last_page_after_id4'>\n        <x xmlns='jabber:x:data'>\n            <field var='after-id'>\n                <value>BO7CUCVVS6O1</value>\n            </field>\n        </x>\n        <set>\n            <max>5</max>\n            <before/>\n        </set>\n    </query>\n</iq>\n\n<!-- Server sends -->\n<message from='alice@localhost' to='alice@localhost/res1' id='4917656e-a5cb-4f4a-9718-ed525a1202ee'>\n    <result queryid='last_page_after_id4' xmlns='urn:xmpp:mam:1' id='BO7CUD0L8B81'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2022-06-08T10:13:01.601837Z' from='alice@localhost/res1'/>\n            <message from='alice@localhost/res1' xmlns='jabber:client' xml:lang='en' to='bob@localhost/res1' type='chat'>\n                <body>Message #11</body>\n            </message>\n        </forwarded>\n    </result>\n</message>\n\n...\n\n<message from='alice@localhost' to='alice@localhost/res1' id='09987901-d53d-4b57-8b3c-5f3aaa2de99b'>\n    <result queryid='last_page_after_id4' xmlns='urn:xmpp:mam:1' id='BO7CUD0U4301'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2022-06-08T10:13:01.638156Z' from='alice@localhost/res1'/>\n            <message from='alice@localhost/res1' xmlns='jabber:client' xml:lang='en' to='bob@localhost/res1' type='chat'>\n                <body>Message #15</body>\n            </message>\n        </forwarded>\n    </result>\n</message>\n<iq from='alice@localhost' to='alice@localhost/res1' id='req3' type='result'>\n    <fin xmlns='urn:xmpp:mam:1'>\n        <set xmlns='http://jabber.org/protocol/rsm'>\n            <first index='6'>BO7CUD0L8B81</first> <!-- id of the message 11 -->\n            <last>BO7CUD0U4301</last> <!-- id of the message 15 -->\n            <count>11</count> <!-- messages 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 -->\n        </set>\n    </fin>\n</iq>\n

Because index is not zero, the client would have to send more queries to get all missing messages.

"},{"location":"open-extensions/mam/#disable-message-counting","title":"Disable message counting","text":"

Sometimes, we don't want to count messages at all. It would improve performance.

For example, if we want to request another page of the result set, we already would know the total number of messages from the first query.

Sometimes, total and offset values are not visible in the UI.

<!-- Client sends -->\n<iq type='set' id='req5'>\n    <query xmlns='urn:xmpp:mam:1' queryid='before10'>\n        <x xmlns='jabber:x:data'>\n            <field var='simple'>\n                <value>true</value>\n            </field>\n        </x>\n        <set>\n            <max>5</max>\n            <before>BO7DD6KDP0O1</before>\n        </set>\n    </query>\n</iq>\n\n...skip messages...\n<!-- Server returns messages and the final IQ -->\n<iq from='alice@localhost' to='alice@localhost/res1' id='req5' type='result'>\n    <fin xmlns='urn:xmpp:mam:1'>\n        <set xmlns='http://jabber.org/protocol/rsm'>\n            <first>BO7DD6K1E8G1</first>\n            <last>BO7DD6KBAAG1</last>\n        </set>\n    </fin>\n</iq>\n
"},{"location":"open-extensions/muc_light/","title":"MUC light","text":""},{"location":"open-extensions/muc_light/#1-introduction","title":"1. Introduction","text":"

Classic Multi-User chat, as described in XEP-0045, adds an IRC-like functionality to XMPP. It distinguishes between the affiliation list and the occupant list, where the latter is based on presences routed to the room from the client resource. While perfectly sufficient for desktop applications and relatively stable network connection, it does not exactly meet the challenges the mobile world it is facing. Modern mobile applications do not rely on presence information, as it can frequently change. The expected user experience not only differs from the IRC model, but also uses only a small subset of XEP-0045 features. The service described in this specification attempts to provide a complete solution for all common use cases of mobile group chats.

"},{"location":"open-extensions/muc_light/#2-requirements","title":"2. Requirements","text":"

Here are some high-level features required from a new variant of MUC

  1. The service allows any user to create a room for group communication.
  2. Users cannot join rooms on their own. They have to be added by the room owner or (if configured by service administrator) any other occupant.
  3. Only the owner can remove other occupants from the room.
  4. Every occupant can leave the room.
  5. A user may block the attempts of being added to the specific room or by specific user.
  6. The message sent in the room is always broadcasted to every occupant.
  7. The full occupant list is always available to all occupants.
  8. The occupant is always visible on the list, even if they do not have any resources online.
  9. Occupants can only have two affiliations: owner and member.
  10. There MUST be at most one owner in the room (the service can choose to treat all users equally).
  11. If the room becomes empty, it is destroyed.
  12. Occupants cannot hide behind nicks. Their real bare JID is always visible to everyone
  13. No exchange of any <presence/> stanza inside the room.
  14. The user MUST be able to retrieve the list of rooms they occupy.
  15. The owner can modify the room configuration at any time; members may also be allowed to set configuration.
  16. All occupants can get the full room configuration at any time.
  17. Room history is available only in Message Archive Management.
"},{"location":"open-extensions/muc_light/#3-entity-use-cases","title":"3. Entity Use Cases","text":""},{"location":"open-extensions/muc_light/#31-discovering-a-muc-light-service","title":"3.1. Discovering a MUC Light Service","text":"

An entity often discovers a MUC service by sending a Service Discovery items (\"disco#items\") request to its own server.

Entity Queries the Server for Associated Services

<iq from='hag66@shakespeare.lit/pda'\n    id='h7ns81g'\n    to='shakespeare.lit'\n    type='get'>\n    <query xmlns='http://jabber.org/protocol/disco#items'/>\n</iq>\n

The server then returns the services that are associated with it.

Server Returns a Disco Items Result

<iq from='shakespeare.lit'\n    id='h7ns81g'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='muclight.shakespeare.lit' name='MUC Light Service'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#32-discovering-the-features-supported-by-a-muc-light-service","title":"3.2. Discovering the Features Supported by a MUC Light Service","text":"

An entity may wish to discover if a service implements the Multi-User Chat protocol; in order to do so, it sends a service discovery information (\"disco#info\") query to the MUC service's JID.

Entity Queries Chat Service for MUC Light Support via Disco

<iq from='hag66@shakespeare.lit/pda'\n    id='lx09df27'\n    to='muclight.shakespeare.lit' type='get'>\n    <query xmlns='http://jabber.org/protocol/disco#info'/>\n</iq>\n

The service MUST return its identity and the features it supports.

Service Returns a Disco Info Result

<iq from='muclight.shakespeare.lit'\n    id='lx09df27'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#info'>\n        <identity category='conference' name='Shakespearean Chat Service' type='text'/>\n        <feature var='urn:xmpp:muclight:0'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#33-discovering-occupied-rooms","title":"3.3. Discovering Occupied Rooms","text":"

The service discovery items (\"disco#items\") protocol enables an entity to query a service for a list of associated items, which in the case of a chat service would consist of the specific chat rooms the entity occupies.

Entity Queries Chat Service for Rooms

<iq from='hag66@shakespeare.lit/pda'\n    id='zb8q41f4'\n    to='muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='http://jabber.org/protocol/disco#items'/>\n</iq>\n

The service MUST return a full list of the rooms the entity occupies. The server SHOULD include room name and version in each item.

Service Returns a Disco Items Result

<iq from='muclight.shakespeare.lit'\n    id='zb8q41f4'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='heath@muclight.shakespeare.lit' name='A Lonely Heath' version='1'/>\n        <item jid='coven@muclight.shakespeare.lit' name='A Dark Cave' version='2'/>\n        <item jid='forres@muclight.shakespeare.lit' name='The Palace' version='3'/>\n        <item jid='inverness@muclight.shakespeare.lit'\n              name='Macbeth&apos;s Castle'\n              version='4'/>\n    </query>\n</iq>\n

If the full list of rooms is large (see XEP-0030 for details), the service MAY return only a partial list of rooms. If it does, it MUST include a <set/> element qualified by the 'http://jabber.org/protocol/rsm' namespace (as defined in Result Set Management (XEP-0059) [1]) to indicate that the list is not the full result set.

Service Returns a Limited List of Disco Items Result

<iq from='muclight.shakespeare.lit'\n    id='hx51v49s'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='alls-well-that-ends-well@muclight.shakespeare.lit'\n              name='Everybody dies'\n              version='1'/>\n        <item jid='as-you-like-it@muclight.shakespeare.lit'\n              name='As you like it'\n              version='2'/>\n        <item jid='cleopatra@muclight.shakespeare.lit' name='Cleo fans' version='3'/>\n        <item jid='comedy-of-errors@muclight.shakespeare.lit'\n              name='404 Comedy not found'\n              version='4'/>\n        <item jid='coriolanus@muclight.shakespeare.lit'\n              name='What is Coriolanus?'\n              version='5'/>\n        <item jid='cymbeline@muclight.shakespeare.lit' name='Music room' version='6'/>\n        <item jid='hamlet@muclight.shakespeare.lit'\n              name='To chat or not to chat?'\n              version='7'/>\n        <item jid='henry-the-fourth-one@muclight.shakespeare.lit'\n              name='Royal Room 1'\n              version='8'/>\n        <item jid='henry-the-fourth-two@muclight.shakespeare.lit'\n              name='Royal Room 2'\n              version='9'/>\n        <item jid='henry-the-fifth@muclight.shakespeare.lit'\n              name='Royal Room Prime'\n              version='10'/>\n        <set xmlns='http://jabber.org/protocol/rsm'>\n            <first index='0'>alls-well-that-ends-well@muclight.shakespeare.lit</first>\n            <last>henry-the-fifth@muclight.shakespeare.lit</last>\n            <count>37</count>\n        </set>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#4-occupant-use-cases","title":"4. Occupant Use Cases","text":""},{"location":"open-extensions/muc_light/#41-sending-a-message-to-a-room","title":"4.1. Sending a message to a room","text":"

Every occupant in the room MAY broadcast messages to other occupants. In order to do so, the client MUST send a groupchat message to the room bare JID.

The room automatically assumes that occupants' nicks are equal to their bare JIDs. MUC light is designed for applications where it is not important to hide behind nicknames. On the contrary - it is up to the client to replace pure JIDs with user-friendly names like phone numbers or full names if necessary.

The room MUST route all messages of the 'groupchat' type.

Client sends a message to the room

<message from='hag66@shakespeare.lit/pda'\n         id='msg111'\n         to='coven@muclight.shakespeare.lit'\n         type='groupchat'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n

Server broadcasts a groupchat message

<message id='msg111' type='groupchat'\n    from='coven@muclight.shakespeare.lit/hag66@shakespeare.lit'\n    to='crone1@shakespeare.lit'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n
<message id='msg111' type='groupchat'\n    from='coven@muclight.shakespeare.lit/hag66@shakespeare.lit'\n    to='crone2@shakespeare.lit'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n

Note the message is sent to all the room occupants including the original sender.

<message id='msg111' type='groupchat'\n    from='coven@muclight.shakespeare.lit/hag66@shakespeare.lit'\n    to='hag66@shakespeare.lit'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n
"},{"location":"open-extensions/muc_light/#42-changing-a-room-subject","title":"4.2. Changing a room subject","text":"

The service MAY allow room occupants to set the room subject by changing the \"subject\" configuration field. A standard configuration stanza is used in this case. Subject change is announced like an ordinary configuration change.

Client sends a message to the room

<iq from='hag66@shakespeare.lit/pda'\n    id='subject1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <subject>To be or not to be?</subject>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='newsubject'>\n    <x xmlns='urn:xmpp:muclight:0#configuration'>\n        <prev-version>asdfghj000</prev-version>\n        <version>asdfghj</version>\n        <subject>To be or not to be?</subject>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag66@shakespeare.lit'\n         type='groupchat'\n         id='newsubject'>\n    <x xmlns='urn:xmpp:muclight:0#configuration'>\n        <prev-version>asdfghj000</prev-version>\n        <version>asdfghj</version>\n        <subject>To be or not to be?</subject>\n    </x>\n    <body />\n</message>\n
<iq to='hag66@shakespeare.lit/pda'\n    id='subject1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#43-requesting-room-information","title":"4.3. Requesting room information","text":"

Room occupants may request room information (configuration and/or occupants list) by an information version. It is up to the service to define the version string, the only requirement for it, is to be unique per room. Please note there are no separate versions for configuration and occupant list alone.

If the server side version does not match the one provided by the client (or if the client does not provide one, i.e. the 'version' element is empty), the service MUST respond with a current version string and full configuration and/or occupant list.

If the version strings match, server MUST reply with an empty result.

Only room occupants can get room information.

Matching versions

<iq from='crone1@shakespeare.lit/desktop'\n    id='config0'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <version>abcdefg</version>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='config0'\n    to='crone1@shakespeare.lit/desktop'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#431-getting-the-room-configuration","title":"4.3.1. Getting the room configuration","text":"

Client gets configuration from the server

<iq from='crone1@shakespeare.lit/desktop'\n    id='getconfig1'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <version>abcdefg</version>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='getconfig1'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <version>123456</version>\n        <roomname>A Dark Cave</roomname>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#432-requesting-a-user-list","title":"4.3.2. Requesting a user list","text":"

Client requests a user list

<iq from='crone1@shakespeare.lit/desktop'\n    id='getmembers'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>abcdefg</version>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='getmembers'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>123456</version>\n        <user affiliation='owner'>user1@shakespeare.lit</user>\n        <user affiliation='member'>user2@shakespeare.lit</user>\n        <user affiliation='member'>user3@shakespeare.lit</user>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#433-requesting-full-room-information","title":"4.3.3. Requesting full room information","text":"

Room occupants may request both lists (configuration + occupants) with a single request.

Client requests room information

<iq from='crone1@shakespeare.lit/desktop'\n    id='getinfo1'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#info'>\n        <version>abcdefg</version>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='getinfo1'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='urn:xmpp:muclight:0#info'>\n        <version>123456</version>\n        <configuration>\n            <roomname>A Dark Cave</roomname>\n        </configuration>\n        <occupants>\n            <user affiliation='owner'>user1@shakespeare.lit</user>\n            <user affiliation='member'>user2@shakespeare.lit</user>\n            <user affiliation='member'>user3@shakespeare.lit</user>\n        </occupants>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#44-leaving-the-room","title":"4.4. Leaving the room","text":"

Every occupant is allowed to leave the room at any time. It is done by modifying their own affiliation.

Occupant leaves the room

<iq from='crone1@shakespeare.lit/desktop'\n    id='leave1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='leave1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag77@shakespeare.lit'\n         type='groupchat'\n         id='leave1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <prev-version>1111111</prev-version>\n        <version>aaaaaaa</version>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag88@shakespeare.lit'\n         type='groupchat'\n         id='leave1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <prev-version>1111111</prev-version>\n        <version>aaaaaaa</version>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='leave1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#45-blocking-functionality","title":"4.5. Blocking functionality","text":"

A user MAY choose to automatically deny being added to the room. All stanzas must be directed to MUC Light service. User MAY send more than one item in a single request and mix both 'user' and 'room' elements.

If the occupant tries to add another user to the room, and this user has set a blocking policy, the server MUST ignore the attempt. No error is returned, this user is simply skipped when processing affiliation change query.

Service denies adding blocking user

<iq from='crone2@shakespeare.lit/desktop'\n    id='blocked1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>crone1@shakespeare.lit</user>\n        <user affiliation='member'>crone3@shakespeare.lit</user>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone2@shakespeare.lit'\n         type='groupchat'\n         id='blockedadd1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>crone3@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag88@@shakespeare.lit'\n         type='groupchat'\n         id='blockedadd1'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>crone3@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<iq to='crone2@shakespeare.lit/desktop'\n    id='blocked1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#451-requesting-a-blocking-list","title":"4.5.1. Requesting a blocking list","text":"

In order to get the current blocking list in the MUC Light service, the client sends an empty IQ get query with a proper namespace.

The list includes only items with a 'deny' action, since the 'allow' behaviour is default for MUC Light and is only used for the list modification.

User retrieves a blocking list

<iq from='crone1@shakespeare.lit/desktop'\n    id='getblock1'\n    to='muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n    </query>\n</iq>\n
<iq type='result'\n    id='getblock1'\n    to='crone1@shakespeare.lit/desktop'\n    from='muclight.shakespeare.lit'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n        <room action='deny'>coven@muclight.shakespeare.lit</room>\n        <user action='deny'>hag77@shakespeare.lit</user>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#452-blocking-a-room","title":"4.5.2. Blocking a room","text":"

In order to block a room, a query must contain at least one 'room' item with a 'deny' action and a room bare JID in the content.

User blocks a room

<iq from='crone1@shakespeare.lit/desktop'\n    id='block1'\n    to='muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n        <room action='deny'>coven@muclight.shakespeare.lit</room>\n        <room action='deny'>chapel@shakespeare.lit</room>\n    </query>\n</iq>\n
<iq type='result'\n    id='block1'\n    to='crone1@shakespeare.lit/desktop'\n    from='muclight.shakespeare.lit' />\n
"},{"location":"open-extensions/muc_light/#453-blocking-a-user","title":"4.5.3. Blocking a user","text":"

In order to block a user, a query must contain at least one 'user' item with a 'deny' action and a user bare JID in the content.

User blocks another user

<iq from='crone1@shakespeare.lit/desktop'\n    id='block2'\n    to='muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n        <user action='deny'>hag66@shakespeare.lit</user>\n        <user action='deny'>hag77@shakespeare.lit</user>\n    </query>\n</iq>\n
<iq type='result'\n    id='block2'\n    to='crone1@shakespeare.lit/desktop'\n    from='muclight.shakespeare.lit' />\n
"},{"location":"open-extensions/muc_light/#454-unblocking","title":"4.5.4. Unblocking","text":"

In order to cancel a blocking, a query must contain at least one 'room' or 'user' item with an 'allow' action and an appropriate bare JID in the content.

Unblocking a JID that is not blocked does not trigger any error. The server MUST return an empty IQ result in such case.

User cancels blocking

<iq from='crone1@shakespeare.lit/desktop'\n    id='unblock1'\n    to='muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#blocking'>\n        <room action='allow'>coven@muclight.shakespeare.lit</room>\n        <user action='allow'>hag66@shakespeare.lit</user>\n    </query>\n</iq>\n
<iq type='result'\n    id='unblock1'\n    to='crone1@shakespeare.lit/desktop'\n    from='muclight.shakespeare.lit' />\n
"},{"location":"open-extensions/muc_light/#5-owner-use-cases","title":"5. Owner Use Cases","text":""},{"location":"open-extensions/muc_light/#51-creating-a-new-room","title":"5.1. Creating a new room","text":"

A room is created by submitting a dedicated stanza. The client application should pick a random room node name, since a human-readable room name is in configuration.

For rules that apply to the configuration options, please see \"Setting room configuration\" chapter.

The client MAY include initial configuration and occupant list (the list MUST NOT include the creator). The server MAY allow sending an incomplete configuration form. In such case the server MUST use the default values for missing fields. The server MAY enforce a minimal occupant list length.

The service MAY either give the creator the 'owner' or 'member' status. In the latter case all users are equal.

Upon room creation success, the service MUST reply with an empty IQ result.

The following rules (similar to the ones relevant to the affiliation change request) apply to the occupant list:

  • 'none' affiliation cannot be used.
  • All user bare JIDs must be unique
  • At most one owner can be chosen. If none is chosen, the room creator will become \"just\" a 'member'.

After the room is created (but before receiving IQ result), new occupants (including the creator) receive <message/> from the room with their affiliations (the stanza MUST include only recipient's affiliation) and the initial room version. <prev-version/> element MUST NOT be included.

Client requests room creation

<iq from='crone1@shakespeare.lit/desktop'\n    id='create1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#create'>\n        <configuration>\n            <roomname>A Dark Cave</roomname>\n        </configuration>\n        <occupants>\n            <user affiliation='member'>user1@shakespeare.lit</user>\n            <user affiliation='member'>user2@shakespeare.lit</user>\n        </occupants>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='createnotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>aaaaaaa</version>\n        <user affiliation='owner'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='user1@shakespeare.lit'\n         type='groupchat'\n         id='createnotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>aaaaaaa</version>\n        <user affiliation='member'>user1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='user2@shakespeare.lit'\n         type='groupchat'\n         id='createnotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>aaaaaaa</version>\n        <user affiliation='member'>user2@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='create1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#511-requesting-a-new-room-with-a-unique-name","title":"5.1.1. Requesting a new room with a unique name","text":"

If a client would like to avoid a room JID conflict, it MAY request creating a new room with a server-side generated name, that is verfied to be unique. In order to do so, the client MUST send a creation request to service JID, not room bare JID. The IQ result will originate from the new room bare JID

The messages with affiliation change notifications MUST have the same ID as IQ set and result.

Client requests room creation

<iq from='crone1@shakespeare.lit/desktop'\n    id='createrandom'\n    to='muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#create'>\n        <configuration>\n            <roomname>Random Cave</roomname>\n        </configuration>\n    </query>\n</iq>\n
<message from='randomcave@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='createrandom'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>aaaaaaa</version>\n        <user affiliation='owner'>crone1@shakespeare.lit</user>\n    </x>\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='createrandom'\n    from='muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#512-room-already-exists","title":"5.1.2. Room already exists","text":"

If the chosen room name already exists, the service MUST return a 'conflict' error.

Client requests room creation with existing name

<iq from='crone1@shakespeare.lit/desktop'\n    id='conflict1'\n    to='castle@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#create'>\n        <configuration>\n            <roomname>A Dark Cave</roomname>\n        </configuration>\n    </query>\n</iq>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='conflict1'\n    from='castle@muclight.shakespeare.lit'\n    type='error'>\n    <error type='cancel'>\n        <conflict xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</iq>\n
"},{"location":"open-extensions/muc_light/#52-destroying-a-room","title":"5.2. Destroying a room","text":"

A room is automatically destroyed when its occupant list becomes empty or the room owner explicitly sends an IQ with a room destroy request.

Before sending an IQ result, every occupant is notified that its affiliation has changed to 'none'. These notifications include an <x/> element qualified with a \"urn:xmpp:muclight:0#destroy\" namespace.

Only the room owner is allowed to destroy it.

Room destruction notification SHOULD NOT contain version (or \"prev-version\" information).

Client requests room destruction

<iq from='crone1@shakespeare.lit/desktop'\n    id='destroy1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#destroy' />\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='destroynotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>crone1@shakespeare.lit</user>\n    </x>\n    <x xmlns='urn:xmpp:muclight:0#destroy' />\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag77@shakespeare.lit'\n         type='groupchat'\n         id='destroynotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>hag77@shakespeare.lit</user>\n    </x>\n    <x xmlns='urn:xmpp:muclight:0#destroy' />\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag88@shakespeare.lit'\n         type='groupchat'\n         id='destroynotif'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </x>\n    <x xmlns='urn:xmpp:muclight:0#destroy' />\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='create1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n
"},{"location":"open-extensions/muc_light/#53-setting-room-configuration","title":"5.3. Setting room configuration","text":"

Only room owners can modify the room configuration but the service MAY allow members to change it too.

All room occupants MUST be notified about a configuration change and both the new and old room version string (<version /> and <prev-version /> respectively).

\"version\" and \"prev-version\" configuration field names are NOT ALLOWED - they are reserved for room versioning.

The service MAY allow the client to set the configuration fields with any name but it is NOT RECOMMENDED.

The Data Forms are not used for the configuration. Instead, the config fields are encoded in XML elements with names equal to the key and content equal to the value.

Client configuration request to the server

<iq from='crone1@shakespeare.lit/desktop'\n    id='conf2'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#configuration'>\n        <roomname>A Darker Cave</roomname>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='configchange'>\n    <x xmlns='urn:xmpp:muclight:0#configuration'>\n        <prev-version>zaqwsx</prev-version>\n        <version>zxcvbnm</version>\n        <roomname>A Darker Cave</roomname>\n    </x>\n    <body />\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag66@shakespeare.lit'\n         type='groupchat'\n         id='configchange'>\n    <x xmlns='urn:xmpp:muclight:0#configuration'>\n        <prev-version>zaqwsx</prev-version>\n        <version>zxcvbnm</version>\n        <roomname>A Darker Cave</roomname>\n    </x>\n    <body />\n</message>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='conf2'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n

The server SHOULD accept incomplete (i.e. delta) configuration forms. In such case, values of the missing fields SHOULD be preserved.

"},{"location":"open-extensions/muc_light/#54-changing-the-occupant-list","title":"5.4. Changing the occupant list","text":"

The occupant list is modified by a direct affiliation change. Following rules apply:

  1. There are only 3 affiliations.
    • owner - can do everything in the room
    • member - can send messages to the room and if the service allows it, can also change configuration or change others' affiliations
    • none - not in the room; it's a keyword for marking a user for removal from a room
  2. Every occupant can change its own affiliation to none in order to leave the room.
  3. The only way to join the room is being added by other occupant.
  4. The owner can change affiliations at will.
  5. If the owner leaves, the server MAY use any strategy to choose a new one.
  6. The room can have at most one owner. Giving someone else the 'owner' status effectively causes the current one to lose it.
  7. The owner can choose a new owner when leaving by including both 'none' and 'owner' items in affiliation change request.
  8. Every user JID can be used in the request at most once.
  9. A single request MAY change multiple affiliations.
  10. All changes must be meaningful, e.g. setting member's affiliation to 'member' is considered a bad request.
  11. Server MAY allow members to add new members but they still cannot make anyone an 'owner' or remove other users from the room.
  12. On success the server will reply with a result IQ with all the changed items. BEFORE returning the IQ result, the service MUST route a message with the affiliation change to all relevant users.

Newcomers, i.e. users that were not occupants before the change, SHOULD receive only their own affiliation and SHOULD NOT receive a <prev-version /> element.

The notifications must include both the new and old room version (<version /> and <prev-version /> respectively) string (except for the ones directed to users that have been removed from the room).

The notifications contain a list of items. The item list may be different from the list in the IQ set, because some of the changes may require additional operations, e.g. choosing new owner when the old one leaves. Users, that are still in the room after the change, will receive the full change list. Users, that have been removed from the room with the request, will get only one item: themselves with affiliation 'none'.

Affiliations change request

Let's consider a room coven with following members:

  • crone1 - owner
  • hag77 - member
  • hag88 - member

hag66 is not in the room yet.

User crone1 wants to add hag66 to the room, kick hag88 out and make hag77 the room owner.

<iq from='crone1@shakespeare.lit/desktop'\n    id='member1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n        <user affiliation='owner'>hag77@shakespeare.lit</user>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </query>\n</iq>\n

Now each user will receive an update. As you can see, affiliations have changed accordingly to crone1 request. However, this request implies one more update. Since hag77 has been promoted to a new owner, crone1 is automatically degraded to member.

<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='memberchange'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <prev-version>njiokm</prev-version>\n        <version>qwerty</version>\n        <user affiliation='member'>crone1@shakespeare.lit</user>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n        <user affiliation='owner'>hag77@shakespeare.lit</user>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </x>\n    <body></body>\n</message>\n

Because hag66 was not a member of this room before, they only receive their own affiliation and no prev-version element.

<message from='coven@muclight.shakespeare.lit'\n         to='hag66@shakespeare.lit'\n         type='groupchat'\n         id='memberchange'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <version>qwerty</version>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n    </x>\n    <body></body>\n</message>\n

hag77 receives an ordinary update, just like crone1.

<message from='coven@muclight.shakespeare.lit'\n         to='hag77@shakespeare.lit'\n         type='groupchat'\n         id='memberchange'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <prev-version>njiokm</prev-version>\n        <version>qwerty</version>\n        <user affiliation='member'>crone1@shakespeare.lit</user>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n        <user affiliation='owner'>hag77@shakespeare.lit</user>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </x>\n    <body></body>\n</message>\n

hag88 has been kicked out of the room and therefore gets only their own affiliation change of type 'none'.

<message from='coven@muclight.shakespeare.lit'\n         to='hag88@shakespeare.lit'\n         type='groupchat'\n         id='memberchange'>\n    <x xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='none'>hag88@shakespeare.lit</user>\n    </x>\n    <body></body>\n</message>\n

crone1 gets the result IQ after the change.

<iq to='crone1@shakespeare.lit/desktop'\n    id='member1'\n    from='coven@muclight.shakespeare.lit'\n    type='result' />\n

"},{"location":"open-extensions/muc_light/#6-interactions-with-rfcs-and-other-xeps","title":"6. Interactions with RFCs and other XEPs","text":""},{"location":"open-extensions/muc_light/#61-user-rosters","title":"6.1. User rosters","text":"

The service MAY add user's rooms to its roster. It allows the client to skip the separate Disco request to the service. Roster items with rooms MUST belong to the group \"urn:xmpp:muclight:0\" (MUC Light namespace) and include the <version/> element. Their subscription type MUST be 'to'.

Entity requests the roster and receives a reply that includes a room item

<iq type='get' id='roster1' to='shakespeare.lit'>\n    <query xmlns='jabber:iq:roster'/>\n</iq>\n
<iq id='roster1' to='hag66@shakespeare.lit/tablet' type='result'>\n    <query xmlns='jabber:iq:roster' ver='ver7'>\n        <item jid='hag77@shakespeare.lit' subscription='both'/>\n        <item jid='hag88@shakespeare.lit' subscription='both'/>\n        <item jid='coven@muclight.shakespeare.lit' name='The Coven' subscription='to'>\n            <group>urn:xmpp:muclight:0</group>\n            <version>1234345</version>\n        </item>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#62-xep-0313-message-archive-management","title":"6.2. XEP-0313 Message Archive Management","text":"

This section defines the rules for archiving MUC Light events and messages. Stanzas described in the subsections below MUST be archived by the server. The stanzas not included here MUST NOT be archived.

The <message/> element inside <forwarded/> MUST include a \"from\" attribute and MUST NOT include a \"to\" attribute. \"id\" SHOULD be archived as well.

In case of regular groupchat messages, the \"from\" attribute MUST consist of a room full JID with a sender bare JID in the resource part. As for room notification, e.g. create event, \"from\" MUST be equal to room bare JID.

Examples below use MAM v0.4 protocol. The archive can be fetched only from a specific room, the client MUST NOT query MUC Light service directly.

"},{"location":"open-extensions/muc_light/#621-groupchat-message-from-occupant","title":"6.2.1. Groupchat message from occupant","text":"

Message from a user MUST be archived with all child elements.

Occupant queries MAM and receives regular groupchat message

<iq type='set' id='mamget1' to='coven@muclight.shakespeare.lit'>\n    <query xmlns='urn:xmpp:mam:1' queryid='f27' />\n</iq>\n
<message id='aeb213' to='hag66@shakespeare.lit/pda'>\n    <result xmlns='urn:xmpp:mam:1' queryid='f27' id='28482-98726-73623'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2010-07-10T23:08:25Z'/>\n            <message from=\"coven@muclight.shakespeare.lit/hag77@shakespeare.lit\"\n                     id=\"msgid11\">\n                <body>Welcome!</body>\n                <x xmlns=\"elixir:ingredient\">bat-wing</x>\n            </message>\n        </forwarded>\n    </result>\n</message>\n
<iq type='result' id='mamget1' from='coven@muclight.shakespeare.lit'/>\n
"},{"location":"open-extensions/muc_light/#622-affiliation-change","title":"6.2.2. Affiliation change","text":"

Every archived affiliation change notification MUST include the <version/> element and MUST NOT contain the <prev-version/> element.

Occupant queries MAM and receives an affiliation change notification

<iq type='set' id='mamget2' to='muclight.shakespeare.lit'>\n    <query xmlns='urn:xmpp:mam:1' queryid='f37' />\n</iq>\n
<message id='aef2133' to='hag66@shakespeare.lit/pda'>\n    <result xmlns='urn:xmpp:mam:1' queryid='f37' id='21482-98726-71623'>\n        <forwarded xmlns='urn:xmpp:forward:0'>\n            <delay xmlns='urn:xmpp:delay' stamp='2013-07-10T21:08:25Z'/>\n            <message from=\"coven@muclight.shakespeare.lit\" id=\"notifid11\">\n                <x xmlns='urn:xmpp:muclight:0#affiliations'>\n                    <version>b9uf13h98f13</version>\n                    <user affiliation='owner'>hag66@shakespeare.lit</user>\n                    <user affiliation='member'>user1@shakespeare.lit</user>\n                    <user affiliation='member'>user2@shakespeare.lit</user>\n                </x>\n            </message>\n        </forwarded>\n    </result>\n</message>\n
<iq type='result' id='mamget12'/>\n
"},{"location":"open-extensions/muc_light/#623-room-creation","title":"6.2.3. Room creation","text":"

Room creation is archived as an affiliation change that includes ALL initial occupants (including the room creator).

"},{"location":"open-extensions/muc_light/#7-general-error-cases","title":"7. General Error Cases","text":""},{"location":"open-extensions/muc_light/#71-client-sends-an-unauthorized-stanza-to-a-room","title":"7.1. Client sends an unauthorized stanza to a room","text":"

If a client sends a stanza to the room, that it does not occupy, the service MUST reply with the 'item-not-found' error.

Unauthorized IQ

<iq from='crone1@shakespeare.lit/desktop'\n    id='member1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user affiliation='member'>hag66@shakespeare.lit</user>\n    </query>\n</iq>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='member1'\n    from='coven@muclight.shakespeare.lit'\n    type='error'>\n    <error type='cancel'>\n        <item-not-found xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</iq>\n

Unauthorized message

<message from='hag66@shakespeare.lit/pda'\n         id='unauth2'\n         to='coven@muclight.shakespeare.lit'\n         type='groupchat'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n
<message to='hag66@shakespeare.lit/pda'\n         id='unauth2'\n         from='coven@muclight.shakespeare.lit'\n         type='error'>\n    <error type='cancel'>\n        <item-not-found xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</message>\n
"},{"location":"open-extensions/muc_light/#72-client-sends-a-presence-stanza-to-the-service","title":"7.2. Client sends a <presence/> stanza to the service","text":"

The service MUST ignore all <presence/> stanzas sent by the client.

"},{"location":"open-extensions/muc_light/#73-client-sends-an-invalid-stanza-to-the-service","title":"7.3. Client sends an invalid stanza to the service","text":"

If service receives an invalid stanza it MUST reply with a 'bad-request' error.

Invalid IQ

<iq from='crone1@shakespeare.lit/desktop'\n    id='bad1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <item role='participant'>hag66@shakespeare.lit</item>\n    </query>\n</iq>\n
<iq to='crone1@shakespeare.lit/desktop'\n    id='bad1'\n    from='coven@muclight.shakespeare.lit'\n    type='error'>\n    <error type='modify'>\n        <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</iq>\n

Invalid message

<message from='hag66@shakespeare.lit/pda'\n         id='bad2'\n         to='coven@muclight.shakespeare.lit'\n         type='chat'>\n    <body>Harpier cries: 'tis time, 'tis time.</body>\n</message>\n
<message to='hag66@shakespeare.lit/pda'\n         id='bad2'\n         from='coven@muclight.shakespeare.lit'\n         type='error'>\n    <error type='modify'>\n        <bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</message>\n
"},{"location":"open-extensions/muc_light/#74-request-sender-has-insufficient-privileges","title":"7.4. Request sender has insufficient privileges","text":"

If the request sender does not have sufficient privileges (but is a room occupant), the service MUST reply with a 'not-allowed' error.

It occurs in the following cases:

  • A member tries to change the configuration but the service is not configured to allow it. It does not apply to the subject change, although it has to be performed by sending <message/> with <subject/>, not configuration <iq/>.
  • A member tries to change anyone's affiliation to 'none' or 'owner'.
  • A member tries to change someone's affiliation to 'member' but the service is not configured to allow it.

Prohibited IQ

<iq from='minion@shakespeare.lit/desktop'\n    id='privileges1'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='urn:xmpp:muclight:0#affiliations'>\n        <user role='owner'>minion@shakespeare.lit</user>\n    </query>\n</iq>\n
<iq to='minion@shakespeare.lit/desktop'\n    id='privileges1'\n    from='coven@muclight.shakespeare.lit'\n    type='error'>\n    <error type='cancel'>\n        <not-allowed xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</iq>\n
"},{"location":"open-extensions/muc_light/#8-implementation-notes","title":"8. Implementation Notes","text":""},{"location":"open-extensions/muc_light/#81-xep-0045-mappings-aka-legacy-mode","title":"8.1. XEP-0045 mappings a.k.a. legacy mode","text":"

Some client-side developers might choose to use existing XEP-0045 Multi-User Chat implementations to interface with the new MUC Light. There may be various reasons to do so: using a familiar protocol, avoiding additional implementation, quick prototyping etc. This section provides suggestions of mappings between XEP-0045 stanzas and the new ones described in this document. These mappings are ONLY available to use in the legacy mode, which allows using a subset of classic MUC stanzas but comes with the drawback that some of the functions are limited.

Operations not described here SHOULD remain unmodified.

"},{"location":"open-extensions/muc_light/#811-discovering-the-features-supported-by-a-muc-service","title":"8.1.1. Discovering the Features Supported by a MUC Service","text":"

A Disco result MAY either include a new <feature/> element with an \"http://jabber.org/protocol/muc\" namespace next to MUC Light one, or completely replace it, which is the RECOMMENDED behaviour.

Returning a MUC namespace in Disco

<iq from='hag66@shakespeare.lit/pda'\n    id='lx09df27'\n    to='muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='http://jabber.org/protocol/disco#info'/>\n</iq>\n
<iq from='muclight.shakespeare.lit'\n    id='lx09df27'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#info'>\n        <identity category='conference'\n                  name='Shakespearean Chat Service'\n                  type='text'/>\n        <feature var='http://jabber.org/protocol/muc'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#812-discovering-occupied-rooms","title":"8.1.2. Discovering Occupied Rooms","text":"

The room list MUST NOT include room versions.

Service Returns Disco Items Result

<iq from='muclight.shakespeare.lit'\n    id='zb8q41f4'\n    to='hag66@shakespeare.lit/pda'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='heath@muclight.shakespeare.lit'\n              name='A Lonely Heath'/>\n        <item jid='coven@muclight.shakespeare.lit'\n              name='A Dark Cave'/>\n        <item jid='forres@muclight.shakespeare.lit'\n              name='The Palace'/>\n        <item jid='inverness@muclight.shakespeare.lit'\n              name='Macbeth&apos;s Castle'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#813-changing-a-room-subject","title":"8.1.3. Changing a room subject","text":"

Instead of distributing the configuration change notifications, the room MUST route <message/> with a <subject/> like a classic MUC would. The client MUST send a classic message <subject/> as well. The room SHOULD save a new subject in the room configuration.

New subject is routed as an ordinary message

<message from='hag66@shakespeare.lit/pda'\n         id='compsubject'\n         to='coven@muclight.shakespeare.lit'\n         type='groupchat'>\n    <subject>To be or not to be?</subject>\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='crone1@shakespeare.lit'\n         type='groupchat'\n         id='compsubject'>\n    <subject>To be or not to be?</subject>\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         to='hag66@shakespeare.lit'\n         type='groupchat'\n         id='compsubject'>\n    <subject>To be or not to be?</subject>\n</message>\n
"},{"location":"open-extensions/muc_light/#814-getting-a-room-configuration","title":"8.1.4. Getting a room configuration","text":"

Room configuration is encoded in a Data Form, that simulates the XEP-0045 config form.

Getting the room configuration does not benefit from room versioning.

Requesting room configuration

<iq from='crone1@shakespeare.lit/desktop'\n    id='comp-config'\n    to='coven@muclight.shakespeare.lit'\n    type='get'>\n    <query xmlns='http://jabber.org/protocol/muc#owner'/>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-config'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/muc#owner'>\n        <x xmlns='jabber:x:data' type='form'>\n            <title>Configuration for \"coven\" Room</title>\n            <field type='hidden' var='FORM_TYPE'>\n                <value>http://jabber.org/protocol/muc#roomconfig</value>\n            </field>\n            <field label='Natural-Language Room Name'\n                   type='text-single'\n                   var='muc#roomconfig_roomname'>\n                <value>A Dark Cave</value>\n            </field>\n            <field label='Room subject'\n                   type='text-single'\n                   var='muc#roomconfig_subject'>\n                <value>To be or not to be?</value>\n            </field>\n        </x>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#815-requesting-a-user-list","title":"8.1.5. Requesting a user list","text":"

A user list is retrieved with an affiliation IQ get.

Requesting affiliation list

<iq from='crone1@shakespeare.lit/desktop'\n    id='comp-getaff'\n    to='coven@muclight.shakespeare.lit' type='get'>\n    <query xmlns='http://jabber.org/protocol/muc#admin'>\n        <item affiliation='owner'/>\n        <item affiliation='member'/>\n    </query>\n</iq>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-getaff'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'>\n    <query xmlns='http://jabber.org/protocol/muc#admin'>\n        <item affiliation='owner'\n              jid='crone1@shakespeare.lit'\n              nick='crone1@shakespeare.lit'\n              role='moderator'/>\n        <item affiliation='member'\n              jid='hag66@shakespeare.lit'\n              nick='hag66@shakespeare.lit'\n              role='participant'/>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#816-requesting-room-information","title":"8.1.6. Requesting room information","text":"

There is no XEP-0045 equivalent for getting full room information.

"},{"location":"open-extensions/muc_light/#817-leaving-the-room","title":"8.1.7. Leaving the room","text":"

Leaving the room is performed by setting the own affiliation to 'none'. The service uses <presence/> to notify all occupants (and former occupant) about the change. <presence/> to the leaving occupant MUST be of the type \"unavailable\" and MUST include a status code 321 (i.e. user leaving due to affiliation change).

Leaving the room

<iq from='crone1@shakespeare.lit/desktop'\n    id='comp-leave'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='http://jabber.org/protocol/muc#admin'>\n        <item affiliation='none' jid='crone1@shakespeare.lit'/>\n    </query>\n</iq>\n
<presence from='coven@muclight.shakespeare.lit/crone1@shakespeare.lit'\n          to='crone1@shakespeare.lit'\n          type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' jid='crone1@shakespeare.lit/pda' role='none'/>\n        <status code='321'/>\n    </x>\n</presence>\n
<presence from='coven@muclight.shakespeare.lit/crone1@shakespeare.lit'\n          to='hag66@shakespeare.lit/desktop'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' jid='crone1@shakespeare.lit/pda' role='none'/>\n        <status code='321'/>\n    </x>\n</presence>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-leave'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'/>\n
"},{"location":"open-extensions/muc_light/#818-blocking-functionality","title":"8.1.8. Blocking functionality","text":"

The blocking functionality uses a small subset of the Privacy Lists protocol. Stanzas MUST be addressed to the sender's bare JID (the to attribute may be skipped). The privacy list name MUST be equal to \"urn:xmpp:muclight:0\". Obviously, this method won't work properly in XMPP Server Federation, because privacy stanzas are handled by sender's server and the MUC Light Blocking functionality is handled by a MUC Light service server. As opposed to XEP-0016, it is allowed to send \"delta\" privacy lists.

"},{"location":"open-extensions/muc_light/#8181-request-blocking-list","title":"8.1.8.1. Request blocking list","text":"

Retrieving blocking list

<iq from='crone1@shakespeare.lit/desktop' type='get' id='comp-getlist'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'/>\n    </query>\n</iq>\n
<iq type='result' id='comp-getlist' to='crone1@shakespeare.lit/desktop'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'>\n            <item type='jid'\n                  value='coven@muclight.shakespeare.lit'\n                  action='deny'\n                  order='1'/>\n            <item type='jid'\n                  value='muclight.shakespeare.lit/hag66@shakespeare.lit'\n                  action='deny'\n                  order='1'/>\n        </list>\n    </query>\n</iq>\n
"},{"location":"open-extensions/muc_light/#8182-blocking-a-room","title":"8.1.8.2. Blocking a room","text":"

In order to block a room, the client MUST deny a room bare JID in privacy list.

Blocking a room

<iq from='crone1@shakespeare.lit/desktop' type='set' id='comp-blockroom'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'>\n            <item type='jid'\n                  value='coven@muclight.shakespeare.lit'\n                  action='deny'\n                  order='1'/>\n        </list>\n    </query>\n</iq>\n
<iq type='result' id='comp-blockroom' to='crone1@shakespeare.lit/desktop' />\n
"},{"location":"open-extensions/muc_light/#8183-blocking-a-user","title":"8.1.8.3. Blocking a user","text":"

In order to block a room, the client MUST deny a service JID with user's bare JID in the resource.

Blocking a user

<iq from='crone1@shakespeare.lit/desktop' type='set' id='comp-blockuser'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'>\n            <item type='jid'\n                  value='muclight.shakespeare.lit/hag66@shakespeare.lit'\n                  action='deny'\n                  order='1'/>\n        </list>\n    </query>\n</iq>\n
<iq type='result' id='comp-blockuser' to='crone1@shakespeare.lit/desktop' />\n
"},{"location":"open-extensions/muc_light/#8184-unblocking","title":"8.1.8.4. Unblocking","text":"

Unblocking

<iq from='crone1@shakespeare.lit/desktop' type='get' id='comp-getlist'>\n    <query xmlns='jabber:iq:privacy'>\n        <list name='urn:xmpp:muclight:0'>\n            <item type='jid'\n                  value='coven@muclight.shakespeare.lit'\n                  action='allow'\n                  order='1'/>\n            <item type='jid'\n                  value='muclight.shakespeare.lit/hag66@shakespeare.lit'\n                  action='allow'\n                  order='1'/>\n        </list>\n    </query>\n</iq>\n
<iq type='result' id='comp-getlist' to='crone1@shakespeare.lit/desktop' />\n
"},{"location":"open-extensions/muc_light/#819-creating-a-room","title":"8.1.9. Creating a room","text":"

The room is created in a standard XEP-0045 way. Client MUST use a nick equal to their own bare JID.

Compatibility mode MUST NOT support a unique room name generation.

Creating a room

<presence from='crone1@shakespeare.lit/desktop'\n          to='coven@muclight.shakespeare.lit/crone1@shakespeare.lit'>\n    <x xmlns='http://jabber.org/protocol/muc'/>\n</presence>\n
<presence from='coven@chat.shakespeare.lit/crone1@shakespeare.lit'\n          to='crone1@shakespeare.lit/desktop'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='owner' role='moderator'/>\n        <status code='110'/>\n        <status code='201'/>\n    </x>\n</presence>\n
"},{"location":"open-extensions/muc_light/#8191-room-already-exists","title":"8.1.9.1. Room already exists","text":"

If the client attempts to create a room that is already used, it will receive an error <presence/> informing that registration is required (like in the case of members-only rooms in XEP-0045).

Creating a room

<presence from='coven@muclight.shakespeare.lit/crone1@shakespeare.lit'\n          to='crone1@shakespeare.lit/desktop'\n          type='error'>\n    <x xmlns='http://jabber.org/protocol/muc'/>\n    <error by='coven@muclight.shakespeare.lit' type='auth'>\n        <registration-required xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>\n    </error>\n</presence>\n
"},{"location":"open-extensions/muc_light/#8110-destroying-the-room","title":"8.1.10. Destroying the room","text":"

A classic XEP-0045 method is used but the service SHOULD NOT forward reason and alternate venue JID.

Destroying the room

<iq from='crone1@shakespeare.lit/desktop'\n    id='begone'\n    to='heath@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='http://jabber.org/protocol/muc#owner'>\n        <destroy jid='coven@muclight.shakespare.lit'>\n            <reason>Some reason.</reason>\n        </destroy>\n    </query>\n</iq>\n
<presence from='heath@chat.shakespeare.lit/crone1@shakespeare.lit'\n    to='crone1@shakespeare.lit/desktop' type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' role='none'/>\n        <destroy />\n    </x>\n</presence>\n
<presence\n    from='heath@chat.shakespeare.lit/wiccarocks@shakespeare.lit'\n    to='wiccarocks@shakespeare.lit/laptop' type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' role='none'/>\n        <destroy />\n    </x>\n</presence>\n
<presence\n    from='heath@chat.shakespeare.lit/hag66@shakespeare.lit'\n    to='hag66@shakespeare.lit/pda'\n    type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' role='none'/>\n        <destroy />\n    </x>\n</presence>\n
<iq from='heath@chat.shakespeare.lit'\n    id='begone'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'/>\n
"},{"location":"open-extensions/muc_light/#8111-setting-room-configuration","title":"8.1.11. Setting room configuration","text":"

Room occupants can use a standard XEP-0045 configuration modification method. The service MUST broadcast only the notification about the configuration change with a status code 104, so every occupant can retrieve the new room configuration in a separate request. The client is allowed to send a config delta in a form.

Setting room configuration

<iq to='coven@muclight.shakespeare.lit'\n    id='comp-setconfig'\n    from='crone1@shakespeare.lit/desktop'\n    type='set'>\n    <query xmlns='http://jabber.org/protocol/muc#owner'>\n        <x xmlns='jabber:x:data' type='form'>\n            <field type='hidden' var='FORM_TYPE'>\n                <value>http://jabber.org/protocol/muc#roomconfig</value>\n            </field>\n            <field label='Natural-Language Room Name'\n                   type='text-single'\n                   var='muc#roomconfig_roomname'>\n                <value>A Darker Cave</value>\n            </field>\n            <field label='Room subject'\n                   type='text-single'\n                   var='muc#roomconfig_subject'>\n                <value>To be!</value>\n            </field>\n        </x>\n    </query>\n</iq>\n
<message from='coven@muclight.shakespeare.lit'\n         id='comp-confchange'\n         to='crone1@shakespeare.lit/desktop'\n         type='groupchat'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <status code='104'/>\n    </x>\n</message>\n
<message from='coven@muclight.shakespeare.lit'\n         id='comp-confchange'\n         to='crone2@shakespeare.lit/desktop'\n         type='groupchat'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <status code='104'/>\n    </x>\n</message>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-setconfig'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'/>\n
"},{"location":"open-extensions/muc_light/#8112-changing-occupant-list","title":"8.1.12. Changing occupant list","text":"

The service MUST send an affiliation change notification to all participants. Leaving users MUST NOT receive any information except for their own \"none\" affiliation. New users MUST receive an invitation message.

Changing occupant list

<iq from='crone1@shakespeare.lit/desktop'\n    id='comp-setaff'\n    to='coven@muclight.shakespeare.lit'\n    type='set'>\n    <query xmlns='http://jabber.org/protocol/muc#admin'>\n        <item affiliation='none' jid='hag66@shakespeare.lit'/>\n        <item affiliation='member' jid='hecate@shakespeare.lit'/>\n    </query>\n</iq>\n
<presence from='coven@chat.shakespeare.lit/hag66@shakespeare.lit'\n          to='hag66@shakespeare.lit'\n          type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' jid='hag66@shakespeare.lit' role='none'/>\n        <status code='321'/>\n    </x>\n</presence>\n
<message from='coven@muclight.shakespeare.lit'\n         id='comp-invite0'\n         to='hecate@shakespeare.lit'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <invite from='crone1@shakespeare.lit'/>\n    </x>\n</message>\n
<presence from='coven@chat.shakespeare.lit/hag66@shakespeare.lit'\n          to='crone1@shakespeare.lit'\n          type='unavailable'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='none' jid='hag66@shakespeare.lit' role='none'/>\n        <status code='321'/>\n    </x>\n</presence>\n
<presence from='coven@chat.shakespeare.lit/hecate@shakespeare.lit'\n          to='crone1@shakespeare.lit'>\n    <x xmlns='http://jabber.org/protocol/muc#user'>\n        <item affiliation='member'\n              jid='hecate@shakespeare.lit'\n              role='participant'\n              nick='hecate@shakespeare.lit'/>\n    </x>\n</presence>\n
<iq from='coven@muclight.shakespeare.lit'\n    id='comp-setaff'\n    to='crone1@shakespeare.lit/desktop'\n    type='result'/>\n
"},{"location":"open-extensions/muc_light/#82-service-limits-and-configuration","title":"8.2. Service limits and configuration","text":"

The MUC Light service may be abused by a malicious users, e.g. due to replicating a single message for every room occupant. The list below contains suggested configurable limits that SHOULD be implemented.

The service features that might vary depending on a specific application are included as well.

  • Maximum number of rooms the user occupies.
  • Blocking feature enabled/disabled.
  • XEP-0045 compatibility mode enabled/disabled.
  • Room creator's initial affiliation: owner/member.
  • Room configuration may be changed by owner/occupants.
  • New members can be invited by owner/occupants.
  • Maximal room size.
"},{"location":"open-extensions/smart-markers/","title":"Smart Markers","text":"

This module allows the client to query for the most recent chat markers.

When a client enters a conversation after being offline for a while, such client might want to know what was the last message-id that was marked according to the rules defined in XEP-0333 - Chat Markers, in order to know where he left of, and build an enhanced UI.

MongooseIM provides such functionality, using mod_smart_markers

"},{"location":"open-extensions/smart-markers/#namespace","title":"Namespace","text":"
esl:xmpp:smart-markers:0\n
"},{"location":"open-extensions/smart-markers/#fetching-a-conversations-latest-markers","title":"Fetching a conversation's latest markers","text":"

Given a peer, i.e., another user or a muc room, we can fetch the marker we last sent, to the main thread or any other sub-thread, with an IQ like the following:

<iq id='iq-unique-id' type='get'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='<peer-bare-jid>' [thread='<thread-id>' after='<RFC3339-timestamp>'] />\n</iq>\n
where:

  • <peer-bare-jid> MUST be the bare jid of the peer whose last marker wants to be checked. It can be the bare jid of a user, or of MUC room.
  • <thread> is an optional attribute that indicates if the check refers to specific a thread in the conversation. If not provided, defaults to the main conversation thread.
  • <after> is an optional attribute indicating whether markers sent only after a certain timestamp are desired. This most often makes sense for big groupchats, as a potential filter to reduce the amount of markers that will be returned.

Then the following would be received, was there to be any marker:

<iq from='user-bare-jid' to='user-jid' id='iq-unique-id' type='result'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='peer-bare-jid'>\n    <marker from='<sender-bare-jid>' id='<message-id>' type='<type>' timestamp='<RFC3339>' [thread='<thread-id>']/>\n  </query>\n</iq>\n
where peer-bare-jid matches the requested bare jid and the subelements are marker xml payloads with the following attributes:

  • <id> is the message id associated to this marker.
  • <type> is a marker as described in XEP-0333.
  • <timestamp> contains an RFC3339 timestamp indicating when the marker was sent
  • <thread> is an optional attribute that indicates if the marker refers to specific a thread in the conversation, or the main conversation if absent.
  • <sender-bare-jid> is the bare jid of the peer who sent the marker, which can be the requester itself, or any peer in the conversation, for both 1:1 chats or groupchats.
"},{"location":"open-extensions/smart-markers/#example-11","title":"Example: 1:1","text":"
<!-- Alice fetches markers in her conversation with Bob -->\n<iq id='iq-unique-id' type='get'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='bob@localhost' />\n</iq>\n\n<!-- She receives as an answer -->\n<iq from='alice@localhost' to='alice@localhost/res1' id='iq-unique-id' type='result'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='bob@localhost'>\n    <marker from='alice@localhost' id='ABCDEFGHIJ' type='displayed' timestamp='2022-02-26T09:11:05.634232Z'/>\n    <marker from='bob@localhost' id='KLMNOPQRST' type='displayed' timestamp='2022-02-26T09:11:07.382923Z'/>\n  </query>\n</iq>\n
"},{"location":"open-extensions/smart-markers/#example-groupchats","title":"Example: groupchats","text":"
<!-- Alice fetches markers in a groupchat -->\n<iq id='iq-unique-id' type='get'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='room@muc.localhost' />\n</iq>\n\n<!-- She receives as an answer -->\n<iq from='alice@localhost' to='alice@localhost/res1' id='iq-unique-id' type='result'>\n  <query xmlns='esl:xmpp:smart-markers:0' peer='room@muc.localhost'>\n    <marker from='alice@localhost' id='XOLWEMUNTO' type='displayed' timestamp='2022-02-26T09:11:05.634232Z'/>\n    <marker from='bob@localhost' id='NNTMWMKSOE' type='displayed' timestamp='2022-02-26T09:11:07.382923Z'/>\n    <marker from='mike@localhost' id='OSNTETNHUR' type='displayed' timestamp='2022-02-26T09:13:07.382923Z'/>\n    <marker from='kate@localhost' id='SNWMENSTUH' type='displayed' timestamp='2022-02-26T09:12:07.382923Z'/>\n  </query>\n</iq>\n
"},{"location":"open-extensions/token-reconnection/","title":"Token-based reconnection","text":""},{"location":"open-extensions/token-reconnection/#introduction","title":"Introduction","text":"

Automatic reconnection after spurious disconnection is a must-have feature in modern IM applications. One way of providing this feature is storing the user login information on the disk. Here you need to balance two values - security and convenience for the end-user. To put it simply: storing passwords in plaintext is inherently insecure while protecting the XMPP password with a master-password damages the user experience. With a token-based authentication mechanism, the user has to provide login information only once, for the initial connection to the XMPP server, and can later rely on the application's automatic use of tokens for subsequent reconnections.

Reconnecting to the XMPP server, usually means that the client has to go through the same long process of SASL challenge-response exchange which may cause noticeable lags, especially while using SCRAM-based mechanisms. Providing a token to the XMPP server is secure and doesn't require multiple challenge-response roundtrips, therefore might significantly speed up reconnection times.

"},{"location":"open-extensions/token-reconnection/#requirements","title":"Requirements","text":"

This extension requires the client application to authenticate to the XMPP server using a regular XMPP authentication mechanism like SCRAM-SHA-1 at least once.

After that, the following authentications may be done using X-OAUTH SASL mechanism with a token obtained from the server.

To enable the feature, modules mod_auth_token and mod_keystore have to be enabled on the server. For more details regarding the configuration see mod_auth_token documentation and mod_keystore.

"},{"location":"open-extensions/token-reconnection/#token-types","title":"Token types","text":"Token Type Description Access token These are short lived tokens whose grants aren't tracked by the server (i.e. there's no need to store anything in a database). Access tokens can be used as a payload for the X-OAUTH authentication mechanism and grant access to the system. Access tokens can't be revoked. An access token is valid only until its expiry date is reached. Refresh token These are longer lived tokens which are tracked by the server, and therefore require persistent storage. Refresh tokens can be used as a payload for the X-OAUTH authentication mechanism and grant access to the system, as well as result in a new set of tokens being returned upon successful authentication. Refresh tokens can be revoked. A refresh token is valid until it has expired, unless it has been revoked. On revocation, it immediately becomes invalid. As the server stores information about granted tokens, it can also persistently mark them as revoked.

While only two token types have been described above, implementations might use other token types for specific purposes. For example, a particular token type could limit the access privileges of a user logged into the system or denote an affiliation with a Multi User Chat room. None of such capability grants are a subject of this specification though.

"},{"location":"open-extensions/token-reconnection/#use-cases","title":"Use cases","text":""},{"location":"open-extensions/token-reconnection/#obtaining-a-token","title":"Obtaining a token","text":"

After authenticating with some other mechanism like SCRAM-SHA-1, a client may request a token from the server by sending the following iq get to its own bare JID:

Client requests tokens

<iq type='get' to='alice@wonderland.com' id='123'>\n    <query xmlns='erlang-solutions.com:xmpp:token-auth:0'/>\n</iq>\n

Server responds with a tokens

<iq from=\"alice@wonderland.com\" type=\"result\" to=\"alice@wonderland.com/resource\" id=\"123\">\n  <items xmlns=\"erlang-solutions.com:xmpp:token-auth:0\">\n    <access_token>YWNjZXNzAGFsaWNlQHdvbmRlcmxhbmQuY29tL01pY2hhbC1QaW90cm93c2tpcy1NYWNCb29rLVBybwA2MzYyMTg4Mzc2NAA4M2QwNzNiZjBkOGJlYzVjZmNkODgyY2ZlMzkyZWM5NGIzZjA4ODNlNDI4ZjQzYjc5MGYxOWViM2I2ZWJlNDc0ODc3MDkxZTIyN2RhOGMwYTk2ZTc5ODBhNjM5NjE1Zjk=</access_token>\n    <refresh_token>cmVmcmVzaABhbGljZUB3b25kZXJsYW5kLmNvbS9NaWNoYWwtUGlvdHJvd3NraXMtTWFjQm9vay1Qcm8ANjM2MjMwMDYxODQAMQAwZGQxOGJjODhkMGQ0N2MzNTBkYzAwYjcxZjMyZDVmOWIwOTljMmI1ODU5MmNhN2QxZGFmNWFkNGM0NDQ2ZGU2MWYxYzdhNTJjNDUyMGI5YmIxNGIxNTMwMTE4YTM1NTc=</refresh_token>\n  </items>\n</iq>\n
"},{"location":"open-extensions/token-reconnection/#authentication-with-an-access-token","title":"Authentication with an access token","text":"

Client authenticates with an access token

<auth xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\" mechanism=\"X-OAUTH\">\nYWNjZXNzAGFsaWNlQHdvbmRlcmxhbmQuY29tL01pY2hhbC1QaW90cm93c2tpcy1NYWNCb29rLVBybwA2MzYyMTg4Mzc2NAA4M2QwNzNiZjBkOGJlYzVjZmNkODgyY2ZlMzkyZWM5NGIzZjA4ODNlNDI4ZjQzYjc5MGYxOWViM2I2ZWJlNDc0ODc3MDkxZTIyN2RhOGMwYTk2ZTc5ODBhNjM5NjE1Zjk=\n</auth>\n
Server responds with a success

<success xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\"/>\n
"},{"location":"open-extensions/token-reconnection/#authentication-with-a-refresh-token","title":"Authentication with a refresh token","text":"

In this situation server will respond with a new refresh token which SHOULD be used in future authentication.

Client authenticates with a refresh token

<auth xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\" mechanism=\"X-OAUTH\">\ncmVmcmVzaABhbGljZUB3b25kZXJsYW5kLmNvbS9NaWNoYWwtUGlvdHJvd3NraXMtTWFjQm9vay1Qcm8ANjM2MjMwMDYxODQAMQAwZGQxOGJjODhkMGQ0N2MzNTBkYzAwYjcxZjMyZDVmOWIwOTljMmI1ODU5MmNhN2QxZGFmNWFkNGM0NDQ2ZGU2MWYxYzdhNTJjNDUyMGI5YmIxNGIxNTMwMTE4YTM1NTc=\n</auth>\n

Server responds with a success and a new refresh token

<success xmlns=\"urn:ietf:params:xml:ns:xmpp-sasl\">\ncmVmcmVzaABhbGljZUB3b25kZXJsYW5kLmNvbS9NaWNoYWwtUGlvdHJvd3NraXMtTWFjQm9vay1Qcm8ANjM2MjMwMDYxODQAMgAwZGQxOGJjODhkMGQ0N2MzNTBkYzAwYjcxZjMyZDVmOWIwOTljMmI1ODU5MmNhN2QxZGFmNWFkNGM0NDQ2ZGU2MWYxYzdhNTJjNDUyMGI5YmIxNGIxNTMwMTE4YTM1NTc=\n</success>\n
"},{"location":"open-extensions/token-reconnection/#token-format","title":"Token format","text":"

All tokens are exchanged as Base64 encoded binary data. Serialization format of the token before encoding with Base64 is dependent on its type. Common parts in every token are BARE_JID and EXPIRES_AT. EXPIRES_AT is a timestamp saying when a given token will expire. \\0 stands for the ASCII null character (i.e. byte 0). Text in single quotes ('example') is literal. ALL_CAPS denote parameters.

"},{"location":"open-extensions/token-reconnection/#access-token-format","title":"Access token format","text":"
BASE64_encode\n        ('access', \\0, BARE_JID, \\0, EXPIRES_AT, \\0, DATA)\n

Example (please note the line break was added only for readability):

'access' \\0 Q8@wonderland.com \\0 64875466454\n    \\0 0acd0a66d06934791d046060cf9f1ad3c2abb3274cc7e7d7b2bc7e2ac4453ed774b6c6813b40ebec2bbc3774d59d4087\n
"},{"location":"open-extensions/token-reconnection/#refresh-token-format","title":"Refresh token format","text":"
BASE64_encode\n        ('refresh', \\0, BARE_JID, \\0, EXPIRES_AT, \\0, SEQUENCE_NO, \\0, DATA)\n

Example (please note the line break was added only for readability):

'refresh' \\0 qp@wonderland.com \\0 64875466457 \\0 6\n    \\0 8f57cb019cd6dc6e7779be165b9558611baf71ee4a40d03e77b78b069f482f96c9d23b1ac1ef69f64c1a1db3d36a96ad\n
"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/","title":"Cluster configuration and node management","text":""},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#environment-configuration","title":"Environment configuration","text":""},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#file-descriptors","title":"File descriptors","text":"

To handle large traffic, some of the system variables need to be tuned. Number one on that list is the maximum number of file descriptors which often is set to 1024. Each MongooseIM connection consumes ~1 file descriptor, so the default value will not suffice for larger installations - when it is exceeded, emfile errors will appear in logs.

To check the current limit execute: ulimit -n.

To list all limits execute: ulimit -a.

In the example below we set limits for a mongooseim user. To increase the limit the following entries should be added in /etc/security/limits.conf:

mongooseim   soft   nofile   1000000\nmongooseim   hard   nofile   1000000\n

If you are using Ubuntu, all /etc/pam.d/common-session* files should include session required pam_limits.so.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#vmargs-file","title":"vm.args file","text":"

This file contains Erlang options used when starting the VM. It is located in REL_ROOT/etc/vm.args where REL_ROOT is the path to a MongooseIM release (ie. _build/prod/rel/mongooseim if you build MongooseIM from source).

When using an SSL/TLS connection we advise to increase ERL_MAX_PORTS to 350000. This value specifies how many ports (files, drivers, sockets etc) can be used by the Erlang VM. Be cautious - it preallocates some structures inside the VM and will have impact on the memory usage. We suggest 350000 for 100\u00a0k users when using an SSL/TLS connection or 250000 in other cases.

To check how memory consumption changes depending on ERL_MAX_PORTS, use the following command:

env ERL_MAX_PORTS=[given value] erl -noinput -eval 'io:format(\"~p~n\",[erlang:memory(system)]).' -s erlang halt\n

Another change you need to make when building a MongooseIM cluster is setting the -sname. To do it, just set the -sname option in vm.args with node's hostname, which must be resolvable on other nodes in the cluster.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#port-range","title":"Port range","text":"

To connect to other nodes, a freshly started node uses a port from the range inet_dist_listen_min to inet_dist_listen_max.

To enable this, add the following line to the vm.args file:

-kernel inet_dist_listen_min 50000 inet_dist_listen_max 50010\n

Make sure that the range you set provides enough ports for all the nodes in the cluster.

Remember to keep an epmd port open (port 4369) if any firewall restrictions are required. Epmd keeps track of which Erlang node is using which ports on the local machine.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#connecting-nodes","title":"Connecting nodes","text":"

Checklist:

  • working directory rel/mongooseim (root of a MongooseIM release or installation)
  • the same cookie across all nodes (vm.args -setcookie parameter)
  • each node should be able to ping other nodes using its sname (ex. net_adm:ping('mongoose@localhost'))
  • RDBMS backend is configured, so CETS could discover nodes
"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#initial-node","title":"Initial node","text":"CETSMnesia

Clustering is automatic. There is no difference between nodes.

There is no action required on the initial node.

Just start MongooseIM using mongooseim start or mongooseim live.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#new-node-joining-cluster","title":"New node - joining cluster","text":"CETSMnesia

Clustering is automatic.

mongooseimctl start\nmongooseimctl started #waits until MongooseIM starts\nmongooseimctl join_cluster ClusterMember\n

ClusterMember is the name of a running node set in vm.args file, for example mongooseim@localhost. This node has to be part of the cluster we'd like to join.

First, MongooseIM will display a warning and a question if the operation should proceed:

Warning. This will drop all current connections and will discard all persistent data from Mnesia. Do you want to continue? (yes/no)\n

If you type yes MongooseIM will start joining the cluster. Successful output may look like the following:

You have successfully joined the node mongooseim2@localhost to the cluster with node member mongooseim@localhost\n

In order to skip the question you can add option -f which will perform the action without displaying the warning and waiting for the confirmation.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#leaving-cluster","title":"Leaving cluster","text":"CETSMnesia

Stopping the node is enough to leave the cluster. If you want to avoid the node joining the cluster again, you have to specify a different cluster_name option in the CETS backend configuration. A different Erlang cookie is a good idea too.

To leave a running node from the cluster, call:

mongooseimctl leave_cluster\n

It only makes sense to use it if the node is part of a cluster, e.g join_cluster was called on that node before.

Similarly to join_cluster a warning and a question will be displayed unless the option -f is added to the command.

The successful output from the above command may look like the following:

The node mongooseim2@localhost has successfully left the cluster\n
"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#removing-a-node-from-the-cluster","title":"Removing a node from the cluster","text":"CETSMnesia

A stopped node would be automatically removed from the node discovery table in RDBMS database after some time. It is needed so other nodes would not try to connect to the stopped node.

To remove another node from the cluster, call the following command from one of the cluster members:

mongooseimctl remove_from_cluster RemoteNodeName\n

where RemoteNodeName is the name of the node that we'd like to remove from our cluster. This command could be useful when the node is dead and not responding and we'd like to remove it remotely. The successful output from the above command may look like the following:

The node mongooseim2@localhost has been removed from the cluster\n
"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#cluster-status","title":"Cluster status","text":"CETSMnesia

Run the command:

mongooseimctl cets systemInfo\n

joinedNodes should contain a list of properly joined nodes:

\"joinedNodes\" : [\n  \"mongooseim@node1\",\n  \"mongooseim@node2\"\n]\n

It should generally be equal to the list of discoveredNodes.

If it is not equal, you could have some configuration or networking issues. You can check the unavailableNodes, remoteNodesWithUnknownTables, and remoteNodesWithMissingTables lists for more information (generally, these lists should be empty). You can read the description for other fields of systemInfo in the GraphQL API reference.

For a properly configured 2 nodes cluster the metrics would show something like that:

mongooseimctl metric getMetrics --name '[\"global\", \"cets\", \"system\"]'\n{\n  \"data\" : {\n    \"metric\" : {\n      \"getMetrics\" : [\n        {\n          \"unavailable_nodes\" : 0,\n          \"type\" : \"cets_system\",\n          \"remote_unknown_tables\" : 0,\n          \"remote_nodes_without_disco\" : 0,\n          \"remote_nodes_with_unknown_tables\" : 0,\n          \"remote_nodes_with_missing_tables\" : 0,\n          \"remote_missing_tables\" : 0,\n          \"name\" : [\n            \"global\",\n            \"cets\",\n            \"system\"\n          ],\n          \"joined_nodes\" : 2,\n          \"discovery_works\" : 1,\n          \"discovered_nodes\" : 2,\n          \"conflict_tables\" : 0,\n          \"conflict_nodes\" : 0,\n          \"available_nodes\" : 2\n        }\n      ]\n    }\n  }\n}\n

You can use the following commands on any of the running nodes to examine the cluster or to see if a newly added node is properly clustered:

mongooseimctl mnesia info | grep \"running db nodes\"\n

This command shows all running nodes. A healthy cluster should contain all nodes here. For example:

running db nodes = [mongooseim@node1, mongooseim@node2]\n
To see stopped or misbehaving nodes the following command can be useful:

mongooseimctl mnesia info | grep \"stopped db nodes\"\n

This command shows which nodes are considered stopped. This does not necessarily indicate that they are down but might be a symptom of a communication problem.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#load-balancing","title":"Load Balancing","text":""},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#elastic-load-balancer-elb","title":"Elastic Load Balancer (ELB)","text":"

When using ELB please be advised that some warm-up time may be needed before the load balancer works efficiently for a big load.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#software-load-balancer","title":"Software load balancer","text":"

A good example of load balancing on the application layer are HAProxy and Nginx.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#dns-based-load-balancing","title":"DNS-based load balancing","text":"

Load balancing can be performed on a DNS level. A DNS response can have a number of IP addresses that can be returned to the client side in a random order.

On the AWS stack this type of balancing is provided by Route53. The description of their service can be found in the Route53 Developer's Guide.

"},{"location":"operation-and-maintenance/Cluster-configuration-and-node-management/#other","title":"Other","text":"

The approaches described above can be mixed - we can use DNS load balancing to pick a software load balancer which will select one of the nodes.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/","title":"Cluster management considerations","text":"

These apply to bare metal, virtualization, hypervisor, containers and other technologies.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#single-node-mongooseim","title":"Single-node MongooseIM","text":"

With a single-node MongooseIM, one can set up a vertically scalable system, that is a function of the server resources. MongooseIM can scale from hundreds to tens of thousands of concurrent users.

Note that in a single-node MongooseIM, there is no load distribution, and no fallback or failover in case of a failure.

This architecture is suitable for low-scale deployments, such as testing and development environments on embedded devices, personal computers, or servers.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#dual-node-mongooseim","title":"Dual-node MongooseIM","text":"

With a dual-node MongooseIM, one can set up a vertically scalable system, that is a function of the servers' resources. We recommend that servers with the same power are used. Both nodes can handle different sets of services, given that these non-MongooseIM services consume roughly the same resources on both servers. In this setup, MongooseIM can scale up to hundred of thousands of concurrent users.

In a dual-node MongooseIM, there is a 50-50 load distribution - there is a possible fallback or failover in case of a node failure. Please keep in mind that to avoid degrading the service the remaining node should be able to handle the full load when necessary.

This setup is applicable to low to mid-scale deployments used f.e. for functional and load testing. We recommend using real dedicated servers, although MongooseIM could run in a cluster mode with low-power machines, such as embedded devices.

This setup provides better fault tolerance and robustness than the single-node but it's not recommended for production environments. The minimum recommended production setup is 3 nodes.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#multi-node-mongooseim","title":"Multi-node MongooseIM","text":"

With a multi-node MongooseIM, one can set up a system that is highly scalable both vertically and horizontally and that is still a function of the servers' resources. We recommend that servers with the same power are used. We also recommend that all the nodes handle the same set of services. In this setup, MongooseIM can scale up to tens of millions of concurrent users.

In a multi-node MongooseIM, with n nodes, there is a 1/n load distribution - there is a possible fallback or failover in case of a node failure. To avoid degrading the service the remaining nodes should be able to handle 1/(n-1) load when necessary.

This setup fits mid and large-scale deployments, such as production environments. We recommend using real dedicated, powerful servers.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#multi-datacenter-mongooseim","title":"Multi-datacenter MongooseIM","text":"

With a multi-datacenter MongooseIM, one can set up a system that is highly scalable both vertically and horizontally. The MongooseIM clusters are simply distributed across continents for local, low-lag client connections, and the clusters are interconnected via high-speed links. In this setup, MongooseIM can scale up to hundreds of millions of concurrent users.

This applies to large and very large-scale deployments.

We advise contacting us in case of such a big deployment.

"},{"location":"operation-and-maintenance/Cluster-management-considerations/#summary-table","title":"Summary table","text":"

Setup: reflects the number of nodes in your cluster. Purpose: is the goal and use of this cluster. Low-end: number of concurrent users on low-power machines, such as laptops, embedded devices, entry-level cloud or bare metal. High-end: number of concurrent users on powerful machines, with lots of memory, multi-core CPU, whether in cloud or bare metal.

Setup Purpose Low-end High-end Single-node Functional testing, development 100 to 10k 100k to 500k Dual-node Low-end system, load testing 1k to 100k 1M to 3M Multi-node High-end production system 10k to 1M 2M to 10M Multi-datacenter Very large scale production system 100k to 10M 10M to 100M"},{"location":"operation-and-maintenance/Cluster-management-considerations/#important-notes","title":"Important notes","text":"

Scalability highly depends on variables such as:

  • machine's power, such as memory, CPU, I/O
  • the type of concurrent users:
    • most iOS apps are not connected in the background, they use APNS to push info to the device
    • web clients use websockets, with fallback on BOSH (HTTP long-polling)
    • client-side and backend-side REST API
  • how much archiving is needed and the latency for storage and querying, which depends a lot on storage backend architecture
  • message throughput:
    • one-to-one
    • MUC
    • MUC light
    • PubSub
    • Presences
    • HTTP notifications (may include queuing systems such as RabbitMQ or Kafka)
  • latency of messaging, both real-time and archived messages
"},{"location":"operation-and-maintenance/Cluster-management-considerations/#os-configuration","title":"OS configuration","text":"

To achieve high scalability you have to adjust the configuration of your operating system.

First, set some network related parameters - this is what we use for load testing:

Parameter Value net.ipv4.ip_local_port_range 1024 65535 net.ipv4.tcp_mem 16777216 16777216 16777216 net.ipv4.tcp_wmem 4096 87380 16777216 net.ipv4.tcp_rmem 4096 87380 16777216

Then, you have to increase the number of file descriptors allowed for the user running your MongooseIM server process. In Linux, this is most commonly done in /etc/security/limits.conf. You should remember, though, that there is a limit to it \u2014 you can't increase it above an upper bound which is set by the fs.file-max kernel parameter. And there is a limit to a possible increase in fs.file-max as well \u2014 you can't increase it beyond 1048576, which is 2^20 and is set by another kernel parameter, fs.nr_open. Once you increase that one, you are good to go.

"},{"location":"operation-and-maintenance/Cluster-restart/","title":"Cluster restart","text":"

When you are using a MongooseIM cluster that is using Mnesia backend for any extensions, there could occur an issue related to the distributed Mnesia nodes.

"},{"location":"operation-and-maintenance/Cluster-restart/#how-to-restart-a-cluster","title":"How to restart a cluster:","text":"

Having Node A and Node B, the cluster restart procedure should occur in the following way:

Start the nodes in the opposite order to the one in which they were stopped. The first node you restart should be the last one to go down. For cluster with 3 nodes, after stopping the nodes ABC, they should be started in CBA order.

"},{"location":"operation-and-maintenance/Cluster-restart/#how-not-to-restart-a-cluster","title":"How NOT to restart a cluster:","text":"

Having Node A and Node B.

When the nodes are stopped in AB order, starting the node A first can result in issues related to the distributed Mnesia nodes and not bring up a node that is fully operational.

Changing the order of the restarted nodes can cause issues with distributed Mnesia. Make sure to follow the recommendations if you are using Mnesia backend for any of the extensions. Please note that for some of the extensions, the Mnesia backend is set by default without having that configured explicitly in the configuration file.

For more information related to the cluster configuration and maintenance, please see Cluster configuration and node management section.

"},{"location":"operation-and-maintenance/Humio/","title":"Humio and MongooseIM","text":""},{"location":"operation-and-maintenance/Humio/#getting-humios-ingest-token","title":"Getting Humio's ingest token","text":"

Visit this url to create a new sandbox's ingest token.

The URL is:

https://cloud.humio.com/YOUR_REPOSITORY_NAME_HERE/settings/ingest-tokens\n
"},{"location":"operation-and-maintenance/Humio/#configure-filebeat","title":"Configure Filebeat","text":"

Configure Filebeat, using this config file priv/filebeat.mongooseim.humio.yml.

We recommend to use the Filebeat docker container. You have to use an open-source version of Filebeat, which has the oss suffix.

This example mounts a log directory $(pwd)/_build/mim1/rel/mongooseim/log as a volume for Filebeat. It also mounts a configuration file $(pwd)/priv/filebeat.mongooseim.humio.yml. Most likely these paths would be different on your machine.

Pass your Humio ingest token as a password argument. Or uncomment and change it inside the filebeat.mongooseim.humio.yml file.

docker run -d \\\n    --name mongooseim-filebeat \\\n    -v \"$(pwd)/_build/mim1/rel/mongooseim/log:/usr/lib/mongooseim/log\" \\\n    -v \"$(pwd)/priv/filebeat.mongooseim.humio.yml:/usr/share/filebeat/filebeat.yml:ro\" \\\n    docker.elastic.co/beats/filebeat-oss:7.9.2 \\\n    filebeat -e -E output.elasticsearch.password=\"abc12345-xxxx-yyyy-zzzz-123456789abc\"\n

Argument -e enables debugging information for Filebeat that can be visible using the docker logs mongooseim-filebeat command.

"},{"location":"operation-and-maintenance/Humio/#viewing-logs","title":"Viewing logs","text":"

Navigate to https://cloud.humio.com/sandbox/search to see the Sandbox's dashboard.

A list of log messages:

Structured log message:

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/","title":"Logging & monitoring","text":""},{"location":"operation-and-maintenance/Logging-%26-monitoring/#logs","title":"Logs","text":"

We strongly recommend storing logs in one centralized place when working in a clustered environment. MongooseIM uses the standard OTP logging framework: Logger. Its handlers can be replaced and customised, according to Logger's documentation.

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#syslog-integration","title":"Syslog integration","text":"

MongooseIM uses syslogger as a Logger handler for syslog. To activate it you have to add syslogger to the applications section in src/mongooseim/app.src:

%% syslogger, % uncomment to enable a logger handler for syslog\n

You also need to edit rel/files/app.config and uncomment the lines:

 % Uncomment these lines to enable logging to syslog.\n % Remember to add syslogger as a dependency in mongooseim.app.src.\n%% {syslogger, [\n%%     {ident, \"mongooseim\"},\n%%     {logger, [\n%%         {handler, sys_log, syslogger,\n%%          #{formatter => {logger_formatter, #{single_line => true}}}}]}]\n%% },\n

You can provide different parameters to change the handler's behaviour as described in the syslogger's GitHub page:

  • ident - a string to tag all the syslog messages with. The default is mongooseim.
  • facility - the facility to log to (see the syslog documentation).
  • log_opts - see the syslog documentation for the description.

Depending on the system you use, remember to also add the appropriate line in the syslog config file. For example, if the facility local0 is set:

local0.info                     /var/log/mongooseim.log\n

All the logs of level info should be passed to the /var/log/mongooseim.log file.

Example log (e.g tail -f /var/log/mongooseim.log):

Apr  1 12:36:49 User.local mongooseim[6068]: [info] <0.7.0> Application mnesia started on node mongooseim@localhost\n
"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#further-multiserver-integration","title":"Further / multiserver integration","text":"

For more advanced processing and analysis of logs, including gathering logs from multiple machines, you can use one of the many available systems (e.g. logstash/elasticsearch/kibana, graylog, splunk), by redirecting mongoose logs to such service with an appropriate Logger's handler.

Check Logging for more information.

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#monitoring","title":"Monitoring","text":""},{"location":"operation-and-maintenance/Logging-%26-monitoring/#wombatoam","title":"WombatOAM","text":"

WombatOAM is an operations and maintenance framework for Erlang based systems. Its Web Dashboard displays this data in an aggregated manner. Additionally, WombatOAM provides interfaces to feed the data to other OAM tools such as Graphite, Nagios or Zabbix.

For more information see: WombatOAM.

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#graphite-collectd","title":"graphite-collectd","text":"

To monitor MongooseIM during load testing, we recommend the following open source applications:

  • Grafana is used for data presentation.
  • Graphite is a server used for metrics storage.
  • collectd is a daemon running on the monitored nodes capturing data related to CPU and Memory usage, IO etc.
"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#plug-in-exometer-reporters","title":"Plug-in Exometer reporters","text":"

MongooseIM uses a fork of Exometer library for collecting metrics. Exometer has many plug-in reporters that can send metrics to external services. We maintain exometer_report_graphite and exometer_report_statsd for Graphite and StatsD respectively. It is possible to enable them in MongooseIM via the app.config file. The file sits next to the mongooseim.toml file in the rel/files and _REL_DIR_/etc directories.

Below you can find a sample configuration. It shows setting up a reporter connecting to graphite running on localhost.

You can see an additional option not listed in the Exometer docs - mongooseim_report_interval, which sets the metrics' resolution, i.e. how often Exometer gathers and sends metrics through reporters. By default, the resolution is set to 60 seconds.

...\n{exometer_core, [\n    {mongooseim_report_interval, 60000}, %% 60 seconds\n    {report, [\n        {reporters, [\n                     {exometer_report_graphite, [\n                                                 {prefix, \"mongooseim\"},\n                                                 {connect_timeout, 5000},\n                                                 {host, \"127.0.0.1\"},\n                                                 {port, 2003},\n                                                 {api_key, \"\"}\n                                                ]}\n                    ]}\n    ]}\n  ]}\n...\n
"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#run-graphite-grafana-in-docker-quick-start","title":"Run Graphite & Grafana in Docker - quick start","text":"

The following commands will download the latest version of kamon/grafana_graphite docker image that contains both Grafana and Graphite, and start them while mounting the local directory ./docker-grafana-graphite-master/data for metric persistence:

curl -SL https://github.com/kamon-io/docker-grafana-graphite/archive/master.tar.gz | tar -xzf -\nmake -C docker-grafana-graphite-master up\n

Go to http://localhost:80 to view the Grafana dashboard that's already set up to use metrics from Graphite.

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#add-metrics-to-grafana-dashboard","title":"Add metrics to Grafana dashboard","text":"

We recommend the following metrics as a baseline for tracking your MongooseIM installation. For time-based metrics, you can choose to display multiple calculated values for a reporting period - we recommend tracking at least max, median and mean.

Session count:                   <prefix>.global.totalSessionCount.value\nXMPP messages received:          <prefix>.<domain>.xmppMessageReceived.one\nXMPP messages sent:              <prefix>.<domain>.xmppMessageSent.one\nSuccessful logins:               <prefix>.<domain>.sessionSuccessfulLogins.one\nLogouts:                         <prefix>.<domain>.sessionLogouts.one\nAuthorization time:              <prefix>.<domain>.backends.auth.authorize.<value-type>\nRDBMS \"simple\" query time:       <prefix>.<domain>.backends.mongoose_rdbms.query.<value-type>\nRDBMS prepared query time:       <prefix>.<domain>.backends.mongoose_rdbms.execute.<value-type>\nMAM lookups:                     <prefix>.<domain>.mam_lookup_messages.one\nMAM archivization time:          <prefix>.<domain>.backends.mod_mam_pm.archive.<value-type>\nMAM lookup time:                 <prefix>.<domain>.backends.mod_mam_pm.lookup.<value-type>\nMAM private messages flush time: <prefix>.<domain>.mod_mam_rdbms_async_pool_writer.flush_time.<value-type>\nMAM MUC messages flush time:     <prefix>.<domain>.mod_mam_muc_rdbms_async_pool_writer.flush_time.<value-type>\n

Note that RDBMS metrics are only relevant if MongooseIM is configured with an RDBMS backend, MAM metrics when mod_mam is enabled and MAM flush times when MAM is configured with an RDBMS backend with async_writer option (default).

"},{"location":"operation-and-maintenance/Logging-%26-monitoring/#example-graph-in-grafana","title":"Example graph in Grafana","text":"

This screenshot shows a graph plotting the RDBMS simple query time metric mentioned above. The graph is plotted for three nodes with each node having a different prefix: mongoose.node1, mongoose.node2 and mongoose.node3.

The queries take metrics for all nodes and all domains (** is a wildcard for multiple parts of the metric name) and group them per-node and per-value-type (respectively 1st and -1st part of the metric's name). Parts of the names are indexed from 0.

Time-based metrics in MongooseIM are given in microseconds, so to display human-readable values in graph's legend, the Y-axis unit has to be edited on the Axes tab.

"},{"location":"operation-and-maintenance/Logging-fields/","title":"Fields","text":"
  • reason, class, stacktrace: standard error catching fields.
  • module, function, line, timestamp, node, when, pid: reserved fields (could be used by logger itself).
  • When logging IQs, adding the acc field should be enough. If acc not available, iq can be used. If iq is not available, sub_el could be logged as a last option.
  • what: why we are logging. We often use the function name as the what field.
    • Suffixes: If something goes wrong, use a _failed suffix (instead of unable_to and _error). The most common suffixes are _starting, _started, _stopping, _stopped, and _result.
    • Prefixes: We sometimes add prefixes to what to signal where we are logging from. Such prefixes should be short. Please, don't prefix with the complete module name. Some examples for prefixes are: mam_, sm_, muc_, auth_, s2s_, pool_.

When checking the final event name, remove duplicates from it.

Bad event names Good event names Why s2s_dns_error s2s_dns_lookup_failed Not _failed prefix s2s_dns_error s2s_dns_lookup_timeout More specific failure reason mod_mam_starting mam_starting Use mam_ prefix for MAM modules mongoose_wpool_mgr_pool_starting pool_starting Too long and repetitive"},{"location":"operation-and-maintenance/Logging-fields/#logger-defaults","title":"Logger defaults","text":"

Timestamp should be ordered first when possible, so that sorting is automatic.

Name Type Description Examples timestamp atom The timestamp (with timezone information) 2018-07-11T13:41:10+00:00 at string Where in code the call or log line was emitted module:function:line level enum log level according to RFC 5424 warning"},{"location":"operation-and-maintenance/Logging-fields/#generally-required","title":"Generally required","text":"Name Type Description Examples Notes what atom Event (or issue) name remove_user_failed text binary Human readable description <<\"MAM failed to contact MySQL\">> result binary Explanation of the what key failed Optional tags [atom] The subcomponent taking action and logging data. [c2s, presence], [mam, rdbms] This category should be chosen based on filtering needs, and may represent the domain of concern for some operations"},{"location":"operation-and-maintenance/Logging-fields/#http-requests","title":"HTTP requests","text":"Name Type Description Examples Notes path binary HTTP path <<\"/api/add_user\">> code integer HTTP code 200 ip tuple IP address inet:ip_address() port integer TCP/UDP port number 5222 peer tuple peer() :: {inet:ip_address(), inet:port_number()} {{127,0,0,1},5222} req map Cowboy request Provide when available reply_body binary Body reply <<\"ok\">>"},{"location":"operation-and-maintenance/Logging-fields/#xmpp","title":"XMPP","text":"Name Type Description Examples Notes acc map mongoose_acc, used to extract fields #{...} user binary Local Username <<\"alice\">> Use #jid.luser when available server binary Local Server (host) name <<\"localhost\">> Use #jid.lserver when available sub_host binary Subhost when MUC or pubsub are used <<\"muc.localhost\">> It's not the same as server remote_user binary Remote Username (usually who makes IQ requests) <<\"alice\">> Use #jid.luser when available remote_server binary Remote Server (usually who makes IQ requests) <<\"otherhost\">> Use #jid.lserver when available iq record MongooseIM IQ record #iq{} Provide when available (but it could be acc instead) sub_el record IQ sub element #xmlel{} Provide ONLY if iq not available c2s_state record C2S process state, that would be used by formatter #state{} from_jid binary Accumulator's from_jid <<\"alice@localhost\">> to_jid binary Accumulator's to_jid <<\"to@localhost\">> packet binary Accumulator's element <<\"<message>...\">> Encoded as XML, not erlang records exml_packet record Same as packet, but in #xmlel{} format #xmlel{} Record, formatted in formatter"},{"location":"operation-and-maintenance/Logging-fields/#other-requests","title":"Other requests","text":"Name Type Description Examples Notes duration integer Duration of some operation in milliseconds 5000 Don't use it for microseconds state_name atom State name in gen_fsm wait_for_stream state term gen_server state #state{} Consider adding a formatter call_from tuple From argument in gen_server's handle_call {Pid, Tag}"},{"location":"operation-and-maintenance/Logging-fields/#when-logging-exceptions","title":"When logging exceptions","text":"

what key should contain en _exception suffix. Following keys should be present:

Name Type Description Examples Notes class enum catch Class:Reason:Stacktrace error reason term catch Class:Reason:Stacktrace http_timeout stacktrace term catch Class:Reason:Stacktrace [...] Formatted by formatter"},{"location":"operation-and-maintenance/Logging-fields/#macros-for-logging-unexpected-requests","title":"Macros for logging unexpected requests","text":"

gen_server processes sometimes receive messages they couldn't process. We use macros to log such events (just because you would need them in each gen_server module).

We don't need to log state or state names for such events.

%% We don't always handle unexpected calls.\nhandle_call(Request, From, State) ->\n    ?UNEXPECTED_CALL(Request, From),\n    {reply, {error, unexpected_call}, State}.\n\n%% We don't always handle unexpected casts.\nhandle_cast(Msg, State) ->\n    ?UNEXPECTED_CAST(Msg),\n    {noreply, State}.\n\n%% We SHOULD ignore all unexpected messages, because they could arrive in case\n%% of gen_server call timeouts.\nhandle_info(Msg, State) ->\n    ?UNEXPECTED_INFO(Msg),\n    {noreply, State}.\n

These macros translate into warning logs with the following keys, respectively:

#{what => unexpected_cast, msg => Msg}.\n#{what => unexpected_info, msg => Msg}.\n#{what => unexpected_call, msg => Msg, call_from => From}.\n
"},{"location":"operation-and-maintenance/Logging/","title":"Configuring logging","text":"

The main configuration for logging is in the Application Config file. You can find it in mongooseim/etc/app.config in the release directory.

"},{"location":"operation-and-maintenance/Logging/#primary-log-level","title":"Primary log level","text":"

Primary log level sets maximum log level in the system. This check is applied for any event in the system before the event is passed to any handler.

Primary log level, that is used before MongooseIM config is loaded:

[\n    {kernel, [\n         {logger_level, notice}\n    ]}\n].\n

Once MongooseIM config is loaded, the loglevel option from mongooseim.toml is used instead.

"},{"location":"operation-and-maintenance/Logging/#primary-filters","title":"Primary filters","text":"

Functions from the filters section are applied for any message once it passes the primary log level check.

Keep that configuration block as it is, unless you are planning to extend the filtering logic.

[{kernel, [\n  {logger, [\n    %% Default filters applied to all events before passing them to handlers:\n    {filters, log, [\n           %% If we want to see complete accumulator in logs\n        %  {preserve_acc_filter, {fun mongoose_log_filter:preserve_acc_filter/2, no_state}},\n           {format_packet_filter, {fun mongoose_log_filter:format_packet_filter/2, no_state}},\n           {format_acc_filter, {fun mongoose_log_filter:format_acc_filter/2, no_state}},\n           {format_c2s_state_filter, {fun mongoose_log_filter:format_c2s_state_filter/2, no_state}},\n           {format_stacktrace_filter, {fun mongoose_log_filter:format_stacktrace_filter/2, no_state}}\n        ]},\n....\n}}].\n

preserve_acc_filter filter is disabled by default, but could be enabled, if you are interested in debugging the accumulator logic (see the mongoose_acc module).

"},{"location":"operation-and-maintenance/Logging/#shell-log-handler","title":"Shell log handler","text":"
  • Controls what MongooseIM prints to the standard output.
  • Erlang OTP docs for logger_std_h
    {handler, shell_log, logger_std_h, #{\n         %% Default log level for handlers is to log everything, that\n         %% passes primary log level and module log levels\n         level => all,\n         formatter => {mongoose_flatlog_formatter, #{\n           map_depth => 3,\n           term_depth => 50\n         }}\n    }},\n
"},{"location":"operation-and-maintenance/Logging/#file-log-handler","title":"File log handler","text":"
  • Controls what and how MongooseIM prints into files.
  • Erlang OTP docs for logger_disk_log_h
  • You can have several file handlers.
  • File handlers should have different handler IDs (i.e. disk_log, disk_json_log)
  • There are two file log handlers defined by default: one that formats in JSON and one that formats in Logfmt format (key=value pairs).
  • Both JSON and Logfmt handlers are enabled by default. We recommend to disable handlers, that you are not using. This could improve performance greatly. To disable them, just remove them from app.config.
  • Check information below about log formatters.
    {handler, disk_log, logger_disk_log_h, #{\n         level => all,\n         config => #{\n           file => \"{{mongooseim_log_dir}}/mongooseim.log\",\n           type => wrap,\n           max_no_files => 5,\n           max_no_bytes => 2097152,\n           sync_mode_qlen => 2000, % If sync_mode_qlen is set to the same value as drop_mode_qlen,\n           drop_mode_qlen => 2000, % synchronous mode is disabled. That is, the handler always runs\n           flush_qlen => 5000,     % in asynchronous mode, unless dropping or flushing is invoked.\n           overload_kill_enable => true\n           % Documentation about Overload protection, together with default values, can be found here:\n           % http://erlang.org/doc/apps/kernel/logger_chapter.html#protecting-the-handler-from-overload\n         },\n         formatter => ...\n    }},\n
"},{"location":"operation-and-maintenance/Logging/#logfmt-file-log-handler","title":"Logfmt file log handler","text":"

Wrapper around the flatlog library with custom template options configured by default.

Options:

  • map_depth - the maximum depth to format maps. map_depth => 3 means that the map #{one => #{two => #{three => #{four => key}}}} would be printed as one_two_three_four=.... While the map #{one => #{two => #{three => key}}} would be still printed as one_two_three=key.
  • term_depth - the maximum depth to which terms are printed. Anything below this depth is replaced with .... unlimited by default.
formatter => {mongoose_flatlog_formatter, #{\n  map_depth => 3,\n  term_depth => 50\n}}\n
"},{"location":"operation-and-maintenance/Logging/#json-file-log-handler","title":"JSON file log handler","text":"

JSON formatted file. It could be used to store messages in ELK, in Humio or in Splunk.

Check this tutorial to configure MongooseIM with Humio. Check below information to configure MongooseIM with ELK.

You can use Filebeat to send messages from the file into ELK.

Options:

  • format_depth - the maximum depth to which terms are printed. Anything below this depth is replaced with .... unlimited by default.
  • format_chars_limit - A soft limit on the number of characters when printing terms. When the number of characters is reached, remaining structures are replaced by \"...\". format_chars_limit defaults to unlimited, which means no limit on the number of characters returned.
  • depth - the maximum depth for json properties. Default is unlimited. Options deeper than the depth are replaced with the ... string.
formatter => {mongoose_json_formatter, #{\n  format_depth => 10,\n  format_chars_limit => 3000,\n  depth => 10\n}}\n
"},{"location":"operation-and-maintenance/Logging/#different-log-level-for-a-specific-module","title":"Different log level for a specific module","text":"

Motivation:

  • Sometimes we are interested in debug messages from a particular module.
  • Useful to debug new or experimental modules.

This example:

  • Changes log level for one particular module.
  • Forwards the log messages to any enabled handler.

Changes:

  • Enable module log level for mongoose_c2s.
%% Module log level\n{module_level, debug, [mongoose_c2s]},\n
"},{"location":"operation-and-maintenance/Logging/#separate-log-for-module-debugging","title":"Separate log for module debugging","text":"

Motivation:

  • Sometimes we are only interested in log messages from one particular module.
  • Useful for debugging and development.
  • Does not affect overload protection in other handlers.

This example:

  • Forwards all logging from a module mongoose_c2s to a separate file.
  • Keeps the other handlers intact.

Changes:

  • Modify any existing handler to explicitly set log level.
  • Enable module log level for mongoose_c2s.
  • Add a new custom handler into kernel.logger options.

Issues:

  • This would also disable module log level logic for other handlers.
%% Existing handlers\n{handler, shell_log, logger_std_h, #{\n     level => notice, %% was level => all\n     ...\n},\n{handler, disk_log, logger_disk_log_h, #{\n      level => notice,\n      ...\n},\n...\n%% Module log level\n{module_level, debug, [mongoose_c2s]},\n%% New handler\n{handler, disk_log_c2s, logger_disk_log_h, #{\n     level => debug,\n     config => #{\n       %% Choose destination:\n       file => \"{{mongooseim_log_dir}}/mongoose_c2s.log\",\n       %% Common options:\n       type => wrap,\n       max_no_files => 5,\n       max_no_bytes => 2097152,\n       sync_mode_qlen => 2000,\n       drop_mode_qlen => 2000,\n       flush_qlen => 5000,\n       overload_kill_enable => true\n     },\n     formatter => {mongoose_flatlog_formatter, #{\n       map_depth => 3,\n       term_depth => 50\n     }},\n     filters => [\n       %% That filter matches messages from mongoose_c2s module\n       {module_filter, {fun mongoose_log_filter:filter_module/2, [mongoose_c2s]}}\n     ]\n}}\n
"},{"location":"operation-and-maintenance/Logging/#setting-up-kibana","title":"Setting up Kibana","text":"

This example sets up ElasticSearch and Kibana for development purposes.

Create a network, so filebeat can find ELK:

docker network create logging\n

Run ELK (consult with the container docs for more options):

docker run -d -p 5601:5601 -p 9200:9200 -p 5044:5044 --network logging --name elk sebp/elk:oss-792\n

Create a volume for logs:

docker volume create mongooseim-logs\n

Run MongooseIM daemon:

docker run -d -t -h mongooseim -v mongooseim-logs:/usr/lib/mongooseim/log \\\n    --network logging --name mongooseim -p 5222:5222 mongooseim/mongooseim:latest\n

The next part is based on Filebeat's docs.

Setup filebeat (should be called once, that creates indexes in Elasticsearch):

docker run --network logging --rm \\\n    docker.elastic.co/beats/filebeat-oss:7.9.2 \\\n    setup -E setup.kibana.host=elk:5601 \\\n          -E output.elasticsearch.hosts='[\"elk:9200\"]'\n

Create filebeat.mongooseim.yml config file:

filebeat.inputs:\n- paths:\n   - /usr/lib/mongooseim/log/mongooseim.json.1\n  input_type: log\n  json.keys_under_root: true\n  json.add_error_key: true\n  json.overwrite_keys: true\n\nprocessors:\n  # Keep the original \"when\" field too, because of microseconds precision\n  - timestamp:\n      field: when\n      layouts:\n        # Date '2006-01-02T15:04:05.999Z' in mongoose format\n        - '2006-01-02T15:04:05.999+00:00'\n      test:\n        - '2020-09-29T11:25:51.925316+00:00'\n

Create a volume for persistent Filebeat data (so, it would not insert log duplicates, if mongooseim-filebeat container is recreated):

docker volume create filebeat-data\n

Actually run the Filebeat daemon:

docker run -d \\\n    --network logging \\\n    --name mongooseim-filebeat \\\n    -v mongooseim-logs:/usr/lib/mongooseim/log \\\n    -v filebeat-data:/usr/share/filebeat/data \\\n    -v=\"$(pwd)/filebeat.mongooseim.yml:/usr/share/filebeat/filebeat.yml:ro\" \\\n    docker.elastic.co/beats/filebeat-oss:7.9.2 \\\n    filebeat -e -E output.elasticsearch.hosts='[\"elk:9200\"]'\n

In case you want to store and view logs from a dev server in Elasticsearch:

docker run -d \\\n    --network logging \\\n    --name mongooseim-filebeat \\\n    -v \"$(pwd)/_build/mim1/rel/mongooseim/log:/usr/lib/mongooseim/log\" \\\n    -v=\"$(pwd)/priv/filebeat.mongooseim.yml:/usr/share/filebeat/filebeat.yml:ro\" \\\n    docker.elastic.co/beats/filebeat-oss:7.9.2 \\\n    filebeat -e -E output.elasticsearch.hosts='[\"elk:9200\"]'\n
"},{"location":"operation-and-maintenance/MongooseIM-metrics/","title":"MongooseIM metrics","text":"

MongooseIM by default collects many metrics showing the user behaviour and general system statistics. They are managed by exometer. MongooseIM uses ESL's fork of this project.

All metrics are divided into the following groups:

  • Per host type metrics: Gathered separately for every host type supported by the cluster.

    Warning

    If a cluster supports many (thousands or more) host types, performance issues might occur. To avoid this, use global equivalents of the metrics with all_metrics_are_global config option.

    • Hook metrics. They are created for every hook and incremented on every call to it.
  • Global metrics: Metrics common for all host types.

    • Data metrics. These are misc. metrics related to data transfers (e.g. sent and received stanza size statistics).
    • VM metrics. Basic Erlang VM statistics.
  • Backend metrics: Histograms with timings of calls to various backends.
"},{"location":"operation-and-maintenance/MongooseIM-metrics/#metrics-types","title":"Metrics types","text":""},{"location":"operation-and-maintenance/MongooseIM-metrics/#spiral","title":"spiral","text":"

This kind of metric provides 2 values: total event count (e.g. stanzas processed) and a value in 60s window (one value). Dividing one value by 60 provides an average per-second value over last minute.

Example: [{total, 1000}, {one, 20}]

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#value","title":"value","text":"

A simple value. It is actually a one-element proplist: [{value, N}].

Example: [{value, 256}]

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#gauge","title":"gauge","text":"

It is similar to a value type but consists of two properties:

  • value
  • ms_since_reset - Time in milliseconds elapsed from the last metric update.

Example: [{value, 12}, {ms_since_reset, 91761}]

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#proplist","title":"proplist","text":"

A metric which is a nonstandard proplist. You can find the lists of keys in metrics descriptions.

Example: [{total,295941736}, {processes_used,263766824}, {atom_used,640435}, {binary,1513152}, {ets,3942592}, {system,32182072}]

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#histogram","title":"histogram","text":"

A histogram collects values over a sliding window of 60s and exposes the following stats:

  • n - A number of samples.
  • mean - An arithmetic mean.
  • min
  • max
  • median
  • 50, 75, 90, 95, 99, 999 - 50th, 75th, 90th, 95th, 99th and 99.9th percentile
"},{"location":"operation-and-maintenance/MongooseIM-metrics/#per-host-type-metrics","title":"Per host type metrics","text":""},{"location":"operation-and-maintenance/MongooseIM-metrics/#hook-metrics","title":"Hook metrics","text":"

There are more hook metrics than what is listed in this table, because they are automatically created for every new hook. As a result it makes more sense to maintain a list of the most relevant or useful items, rather than keeping this table fully in sync with the code.

Name Type Description (when it gets incremented) [HostType, anonymous_purge] spiral An anonymous user disconnects. [HostType, disco_info] spiral An information about the server has been requested via Disco protocol. [HostType, disco_local_features] spiral A list of server features is gathered. [HostType, disco_local_identity] spiral A list of server identities is gathered. [HostType, disco_local_items] spiral A list of server's items (e.g. services) is gathered. [HostType, disco_sm_features] spiral A list of user's features is gathered. [HostType, disco_sm_identity] spiral A list of user's identities is gathered. [HostType, disco_sm_items] spiral A list of user's items is gathered. [HostType, mam_lookup_messages] spiral An archive lookup is performed. [HostType, offline_message] spiral A message was sent to an offline user. (Except for \"error\", \"headline\" and \"groupchat\" message types.) [HostType, offline_groupchat_message] spiral A groupchat message was sent to an offline user. [HostType, privacy_updated_list] spiral User's privacy list is updated. [HostType, resend_offline_messages] spiral A list of offline messages is gathered for delivery to a user's new connection. [HostType, roster_get_subscription_lists] spiral Presence subscription lists (based on which presence updates are broadcasted) are gathered. [HostType, roster_in_subscription] spiral A presence with subscription update is processed. [HostType, roster_out_subscription] spiral A presence with subscription update is received from a client. [HostType, sm_broadcast] spiral A stanza is broadcasted to all of user's resources. [HostType, unset_presence] spiral A user disconnects or sends an unavailable presence."},{"location":"operation-and-maintenance/MongooseIM-metrics/#presences-rosters","title":"Presences & rosters","text":"Name Type Description (when it gets incremented) [HostType, modPresenceSubscriptions] spiral Presence subscription is processed. [HostType, modPresenceUnsubscriptions] spiral Presence unsubscription is processed. [HostType, modRosterGets] spiral User's roster is fetched. [HostType, modRosterPush] spiral A roster update is pushed to a single session. [HostType, modRosterSets] spiral User's roster is updated."},{"location":"operation-and-maintenance/MongooseIM-metrics/#privacy-lists","title":"Privacy lists","text":"Name Type Description (when it gets incremented) [HostType, modPrivacyGets] spiral IQ privacy get is processed. [HostType, modPrivacyPush] spiral Privacy list update is sent to a single session. [HostType, modPrivacySets] spiral IQ privacy set is processed. [HostType, modPrivacySetsActive] spiral Active privacy list is changed. [HostType, modPrivacySetsDefault] spiral Default privacy list is changed. [HostType, modPrivacyStanzaAll] spiral A packet is checked against the privacy list. [HostType, modPrivacyStanzaDenied] spiral Privacy list check resulted in deny. [HostType, modPrivacyStanzaBlocked] spiral Privacy list check resulted in block."},{"location":"operation-and-maintenance/MongooseIM-metrics/#other","title":"Other","text":"Name Type Description (when it gets incremented) [HostType, sessionAuthFails] spiral A client failed to authenticate. [HostType, sessionCount] counter Number of active sessions. [HostType, sessionLogouts] spiral A client session is closed. [HostType, sessionSuccessfulLogins] spiral A client session is opened. [HostType, xmppErrorIq] spiral An error IQ is sent to a client. [HostType, xmppErrorMessage] spiral An error message is sent to a client. [HostType, xmppErrorPresence] spiral An error presence is sent to a client. [HostType, xmppErrorTotal] spiral A stanza with error type is routed. [HostType, xmppMessageBounced] spiral A service-unavailable error is sent, because the message recipient if offline. [HostType, xmppIqSent] spiral An IQ is sent by a client. [HostType, xmppMessageSent] spiral A message is sent by a client [HostType, xmppPresenceSent] spiral A presence is sent by a client. [HostType, xmppStanzaSent] spiral A stanza is sent by a client. [HostType, xmppIqReceived] spiral An IQ is sent to a client. [HostType, xmppMessageReceived] spiral A message is sent to a client. [HostType, xmppPresenceReceived] spiral A presence is sent to a client. [HostType, xmppStanzaReceived] spiral A stanza is sent to a client. [HostType, xmppStanzaCount] spiral A stanza is sent to and by a client. [HostType, xmppStanzaDropped] spiral A stanza is dropped due to an AMP rule or a filter_packet processing flow."},{"location":"operation-and-maintenance/MongooseIM-metrics/#extension-specific-metrics","title":"Extension-specific metrics","text":"

Metrics specific to an extension, e.g. Message Archive Management, are described in respective module documentation pages.

"},{"location":"operation-and-maintenance/MongooseIM-metrics/#global-metrics","title":"Global metrics","text":"Name Type Description (when it gets incremented) [global, routingErrors] spiral It is not possible to route a stanza (all routing handlers failed). [global, nodeSessionCount] value A number of sessions connected to a given MongooseIM node. [global, totalSessionCount] value A number of sessions connected to a MongooseIM cluster. [global, uniqueSessionCount] value A number of unique users connected to a MongooseIM cluster (e.g. 3 sessions of the same user will be counted as 1 in this metric). [global, cache, unique_sessions_number] gauge A cached value of uniqueSessionCount. It is automatically updated when a unique session count is calculated. [global, nodeUpTime] value Node uptime. [global, clusterSize] value A number of nodes in a MongooseIM cluster seen by a given MongooseIM node (based on Mnesia). For CETS use global.cets.system.joined_nodes instead. [global, tcpPortsUsed] value A number of open tcp connections. This should relate to the number of connected sessions and databases, as well as federations and http requests, in order to detect connection leaks. [global, processQueueLengths] probe The number of queued messages in the internal message queue of every erlang process, and the internal queue of every fsm (ejabberd_s2s). This is sampled every 30 seconds asynchronously. It is a good indicator of an overloaded system: if too many messages are queued at the same time, the system is not able to process the data at the rate it was designed for."},{"location":"operation-and-maintenance/MongooseIM-metrics/#data-metrics","title":"Data metrics","text":"Metric name Type Description [global, data, xmpp, received, xml_stanza_size] histogram A size (in bytes) of a received stanza after decryption. [global, data, xmpp, sent, xml_stanza_size] histogram A size (in bytes) of a sent stanza before encryption. [global, data, xmpp, received, c2s, tcp] spiral A size (in bytes) of unencrypted data received from a client via TCP channel. [global, data, xmpp, sent, c2s, tcp] spiral A size (in bytes) of unencrypted data sent to a client via TCP channel. [global, data, xmpp, received, c2s, tls] spiral A size (in bytes) of a data received from a client via TLS channel after decryption. [global, data, xmpp, sent, c2s, tls] spiral A size (in bytes) of a data sent to a client via TLS channel before encryption. [global, data, xmpp, received, c2s, bosh] spiral A size (in bytes) of a data received from a client via BOSH connection. [global, data, xmpp, sent, c2s, bosh] spiral A size (in bytes) of a data sent to a client via BOSH connection. [global, data, xmpp, received, c2s, websocket] spiral A size (in bytes) of a data received from a client via WebSocket connection. [global, data, xmpp, sent, c2s, websocket] spiral A size (in bytes) of a data sent to a client via WebSocket connection. [global, data, xmpp, received, s2s] spiral A size (in bytes) of a data received via TCP and TLS (after decryption) Server-to-Server connections. [global, data, xmpp, sent, s2s] spiral A size (in bytes) of a data sent via TCP and TLS (before encryption) Server-to-Server connections. [global, data, xmpp, received, component] spiral A size (in bytes) of a data received from XMPP component. [global, data, xmpp, sent, component] spiral A size (in bytes) of a data sent to XMPP component. [HostType, data, xmpp, c2s, message, processing_time] histogram Processing time for incomming c2s stanzas. [global, data, dist] proplist Network stats for an Erlang distributed communication. A proplist with values: recv_oct, recv_cnt, recv_max, send_oct, send_max, send_cnt, send_pend, connections. [global, data, rdbms, PoolName] proplist For every RDBMS pool defined, an instance of this metric is available. It is a proplist with values workers, recv_oct, recv_cnt, recv_max, send_oct, send_max, send_cnt, send_pend."},{"location":"operation-and-maintenance/MongooseIM-metrics/#cets-system-metrics","title":"CETS system metrics","text":"Metric name Type Description [global, cets, system] proplist A proplist with a list of stats. Description is below. Stat Name Description available_nodes Available nodes (nodes that are connected to us and have the CETS disco process started). unavailable_nodes Unavailable nodes (nodes that do not respond to our pings). joined_nodes Joined nodes (nodes that have our local tables running). discovered_nodes Discovered nodes (nodes that are extracted from the discovery backend). remote_nodes_without_disco Nodes that have more tables registered than the local node. remote_nodes_with_unknown_tables Nodes with unknown tables. remote_unknown_tables Unknown remote tables. remote_nodes_with_missing_tables Nodes that are available, but do not host some of our local tables. remote_missing_tables Nodes that replicate at least one of our local tables to a different list of nodes. conflict_nodes Nodes that replicate at least one of our local tables to a different list of nodes. conflict_tables Tables that have conflicting replication destinations. discovery_works Returns 1 if the last discovery attempt is successful (otherwise returns 0)."},{"location":"operation-and-maintenance/MongooseIM-metrics/#vm-metrics","title":"VM metrics","text":"Metric name Type Description [global, erlang, memory] proplist A proplist with total, processes_used, atom_used, binary, ets and system memory stats. [global, erlang, system_info] proplist A proplist with port_count, port_limit, process_count, process_limit, ets_limit stats."},{"location":"operation-and-maintenance/MongooseIM-metrics/#backend-metrics","title":"Backend metrics","text":"

Some extension modules expose histograms with timings of calls made to their backends. Please check the documentation of modules that are enabled in your config file, in order to learn if they provide them.

All module backend metrics names use the following convention: [global, backends, Module, BackendAction] and [global, backends, Module, BackendAction, count]. The former is a histogram of operation times. However, the time is not recorded if a backend operation exits with an exception. The latter is a number of calls (spiral metric), incremented for every call (even a failed one).

Besides these, following authentication metrics are always available:

  • [HostType, backends, auth, authorize]
  • [HostType, backends, auth, check_password]
  • [HostType, backends, auth, try_register]
  • [HostType, backends, auth, does_user_exist]

These are total times of respective operations. One operation usually requires only a single call to an auth backend but sometimes with e.g. 3 backends configured, the operation may fail for first 2 backends. In such case, these metrics will be updated with combined time of 2 failed and 1 successful request.

Additionally, the RDBMS layer in MongooseIM exposes two more metrics, if RDBMS is configured:

  • [global, backends, mongoose_rdbms, query] - Execution time of a \"simple\" (not prepared) query by a DB driver.
  • [global, backends, mongoose_rdbms, execute] - Execution time of a prepared query by a DB driver.
"},{"location":"operation-and-maintenance/Rolling-upgrade/","title":"Rolling upgrade","text":""},{"location":"operation-and-maintenance/Rolling-upgrade/#rolling-upgrade","title":"Rolling upgrade","text":"

For all MongooseIM production deployments we recommend running multiple server nodes connected in a cluster behind a load-balancer. Rolling upgrade is a process of upgrading MongooseIM cluster, one node at a time. Make sure you have at least the number of nodes able to handle your traffic plus one before the rolling upgrade to guarantee the availability and minimise the downtime. Running different MongooseIM versions at the same time beyond the duration of the upgrade is not recommended and not supported.

Rolling upgrade procedure is recommended over configuration reload which is not supported since version 4.1.

Please note that more complex upgrades that involve schema updates, customisations or have functional changes might require more specific and specially crafted migration procedure.

If you want just to make the changes to the configuration file, please follow steps 1, 3, 4, 6, 7, 8. This type of change can also be done one node at a time. It would require you to check the cluster status, modify the configuration file and restart the node.

The usual MongooseIM cluster upgrade can be achieved with the following steps:

"},{"location":"operation-and-maintenance/Rolling-upgrade/#1-check-the-cluster-status","title":"1. Check the cluster status.","text":"

Use the following command on the running nodes and examine the status of the cluster:

mongooseimctl mnesia info | grep \"running db nodes\"\n\nrunning db nodes = [mongooseim@node1, mongooseim@node2]\n

This command shows all running nodes. A healthy cluster should list all nodes that are part of the cluster.

Should you have any issues related to node clustering, please refer to Cluster configuration and node management section.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#2-copy-the-configuration-file","title":"2. Copy the configuration file.","text":"

Make a copy of the configuration file before the upgrade, as some package managers might override your custom configuration with the default one. Please note that since version 4.1 *.cfg MongooseIM configuration format is no longer supported and needs to be rewritten in the new *.toml format.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#3-apply-the-changes-from-the-migration-guide","title":"3. Apply the changes from the migration guide.","text":"

All modifications of the configuration file or updates of the database schema, that are required to perform version upgrade, can be found in the Migration Guide section. When upgrading more than one version, please make sure to go over all consecutive migration guides.

For example, when migrating from MongooseIM 3.7 to 4.1, please familiarize yourself with and apply all necessary changes described in the following pages of the Migration Guide section.

  • 3.7.0 to 4.0.0
  • 4.0.0 to 4.0.1
  • 4.0.1 to 4.1.0
"},{"location":"operation-and-maintenance/Rolling-upgrade/#4-stop-the-running-node","title":"4. Stop the running node.","text":"

Use the following command to stop the MongooseIM node:

mongooseimctl stop\n
"},{"location":"operation-and-maintenance/Rolling-upgrade/#5-install-new-mongooseim-version","title":"5. Install new MongooseIM version.","text":"

You can get the new version of MongooseIM by either building MongooseIM from source code or downloading and upgrading from package.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#6-start-the-node","title":"6. Start the node.","text":"

Use the following command to start and check the status of the MongooseIM node and the cluster:

mongooseimctl start\nmongooseimctl status\n\nmongooseimctl mnesia info | grep \"running db nodes\"\n
"},{"location":"operation-and-maintenance/Rolling-upgrade/#7-test-the-cluster","title":"7. Test the cluster.","text":"

Please verify that the nodes are running and part of the same cluster. If the cluster is working as expected, the migration of the node is complete.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#8-upgrade-the-remaining-nodes","title":"8. Upgrade the remaining nodes.","text":"

Once all the prior steps are completed successfully, repeat the process for all nodes that are part of the MongooseIM cluster.

"},{"location":"operation-and-maintenance/Rolling-upgrade/#further-cluster-upgrade-considerations","title":"Further cluster upgrade considerations","text":"

Another way to perform a cluster upgrade with minimising possible downtime would be to setup a parallel MongooseIM cluster running newer version. You can redirect the incoming traffic to the new cluster with use of a load-balancer.

Once no connections are handled by the old cluster, it can by safely stopped and the migration is complete.

We highly recommend testing new software release in staging environment before it is deployed on production.

Should you need any help with the upgrade, deployments or load testing of your MongooseIM cluster, please reach out to us. MongooseIM consultancy and support is part of our commercial offering.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/","title":"System Metrics Privacy Policy","text":""},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#introduction","title":"Introduction","text":"

MongooseIM system metrics are gathered to analyse the trends and needs of our users, improve MongooseIM, and let us know where to focus our efforts. This section is devoted to explaining how to customise, read, enable and disable collecting of the system metrics.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#consent","title":"Consent","text":"

To ensure transparency, a log message is generated on every MongooseIM node start (unless the metrics service is configured with the report option) to show that the functionality is enabled. The user is being notified that the metrics are gathered and has the right to withdraw the consent at any time without limiting the functionality of the product. For more information on how to disable this feature, please see the Services section.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#what-information-is-being-gathered","title":"What information is being gathered?","text":"

When introducing this feature, it is crucial for us to be fully transparent as to what information is being gathered. In general, we capture information on how MongooseIM is being used, its version and the chosen feature set. We only report the names of known modules and APIs that are part of the opensource product. All additional customisations are simply counted without disclosing any specific details. The user can view all the information that is shared in two different ways. The log file system_metrics_report.json contains the most recent report that was sent. Additionally, the user can configure the Tracking ID to use their own Google Analytics account and have a view of their MongooseIM status in that dashboard. For more information on how to set up the Tracking ID, please see How to configure additional and private Tracking ID in Google Analytics.

The full list of information that is being gathered can be seen below:

  • MongooseIM node uptime.
  • MongooseIM version.
  • Number of nodes that are part of the MongooseIM cluster.
  • Generic modules that are part of the opensource project and are in use. Some modules report what database they use as a backend, e.g. Sample report.
  • Number of custom modules - without disclosing any details, we are just curious to see if there are any.
  • Number of connected external XMPP components.
  • List of configured REST APIs that are part of the opensource project.
  • XMPP transport mechanisms like, TCP/TLS, WebSockets or BOSH.
  • Geographical Data - Google Analytics is providing several geographical dimensions, such as City, Country, Continent. These values are derived from the IP address the data was sent from. See About Geographical Data for more details.
"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-is-the-information-being-used","title":"How is the information being used?","text":"

The information collected is automatically anonymised before it is being processed any further. Each MongooseIM is randomly generating a Client ID that is being attached to the reports. The collected data has only statistical relevance and aims to help us understand the needs of our users. Knowing how our product is used will allow us to identify the core value it brings to the users. It will point out the direction in which to expand it and show us how to target our further efforts developing it.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-does-a-report-look-like","title":"How does a report look like?","text":"

A sample report showing metrics for the mod_vcard backends from Google Analytics can be found below.

Based on such report we can see the frequency of different backends being used with mod_vcard.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-often-are-the-metrics-reported","title":"How often are the metrics reported?","text":"

Metrics are reported first shortly after the system startup and later at regular intervals. These timers are configurable using the initial_report and periodic_report parameters. The default values are 5 minutes for the initial report and 3 hours for the periodic one. These reporting intervals can be changed depending on the configuration parameters.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-to-configure-this-service","title":"How to configure this service?","text":"

This functionality is provided as a \"service\". For more details regarding service configuration, please see the Services section.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#how-to-configure-additional-and-private-tracking-id-in-google-analytics","title":"How to configure additional and private Tracking ID in Google Analytics?","text":"

The data is gathered and forwarded to Google Analytics. The user can add custom Google Analytics Tracking ID in the MongooseIM configuration and see all incoming events that are related to their own system metrics. For more details on how to create or sign in to the Google Analytics account, please see Get Started with Analytics.

Tracking ID is a property identification code that all collected data is associated with. It is determining the destination where the collected data is sent. To create a new Tracking ID, please follow the steps below:

Warning

MongooseIM no longer supports Universal Analytics. To use metrics it is needed to create an instance of Google Analytics 4.

  • Go to the Admin tab of your user dashboard.
  • Create a new account with + Create Account.
  • Add new property with + Create Property.
    • Within the new property go to Data Streams > Add stream > Web.
    • After successful creation, the ID can be found in the top right corner of the section and has the following format G-XXXX and is named Measurement ID.
  • To create an API secret, in a Data Stream view go to Event > Measurement Protocol API secrets and use the Create button in the top right corner to create a new secret.
"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#example-configuration","title":"Example configuration","text":"

New Tracking ID can be added to the list of options

[services.service_mongoose_system_metrics]\n  initial_report = 300_000\n  periodic_report = 10_800_000\n  tracking_id.id = \"G-XXXX\"\n  tracking_id.secret = \"Secret\"\n

For more details regarding service configuration, please see Services section.

"},{"location":"operation-and-maintenance/System-Metrics-Privacy-Policy/#data-sharing-policy","title":"Data Sharing Policy","text":"

For more information on how Google Analytics collects and processes data, please see Google Privacy & Terms. Google Analytics is being used due to the ease of host and display reporting information. We will not share any user specific information with further third parties not mentioned in this document. Some insight into the statistical significance regarding our findings from the bulk data collected, has been shared as a blog post on our website.

"},{"location":"operation-and-maintenance/gdpr-considerations/","title":"GDPR considerations","text":"

This page describes what GDPR implies in terms of server management.

"},{"location":"operation-and-maintenance/gdpr-considerations/#data-affected-by-gdpr-commands","title":"Data affected by GDPR commands","text":"
  • inbox - All entries in the subject's inbox. If their messages are stored in other users' inbox, they will not be removed.
  • message archive - Same as above for 1-1 messages. In case of group chat messages, they are retrieved as personal data but not removed.
  • offline storage - All messages stored for delivery.
  • roster - All entries in the subject's roster. Other users' rosters are NOT affected, even if they include the subject's JID or other data.
  • vCard - The entire content of the subject's vCard.
  • private XML storage - All items stored by the subject will be removed.
  • publish-subscribe
    • retrieval: all subject's subscriptions and nodes (with their payloads included).
    • removal: subject's subscriptions, push and PEP nodes (with their data included).
"},{"location":"operation-and-maintenance/gdpr-considerations/#gdpr-cli-commands","title":"GDPR CLI commands","text":"

All CLI commands are accessible via the mongooseimctl command, located in the bin/ directory inside the MIM release.

"},{"location":"operation-and-maintenance/gdpr-considerations/#creating-a-gdpr-safe-user-account","title":"Creating a GDPR-safe user account","text":"

mongooseimctl account registerUser --domain <domain> --password <password>

This command will create an anonymised JID with a random username part. It ensures that no personal information will be leaked via logs or database entries, which include the user's JID.

"},{"location":"operation-and-maintenance/gdpr-considerations/#example","title":"Example","text":"
$ mongooseimctl account registerUser --domain localhost --password secret\n{\n  \"data\" : {\n    \"account\" : {\n      \"registerUser\" : {\n        \"message\" : \"User 1661-175924-881845-449bca06515e060a@localhost successfully registered\",\n        \"jid\" : \"1661-175924-881845-449bca06515e060a@localhost\"\n      }\n    }\n  }\n}\n
"},{"location":"operation-and-maintenance/gdpr-considerations/#retrieval-of-personal-data","title":"Retrieval of Personal Data","text":"

mongooseimctl gdpr retrievePersonalData --username <username> --domain <domain> --resultFilepath <filepath for the output as a zip>

It retrieves personal data accessible to the server (see \"Technical limitations\" section below). The directory where the zip file will be created must already exist.

After the execution is complete, a zip file will appear in the specified folder with personal information in CSV files grouped by type.

"},{"location":"operation-and-maintenance/gdpr-considerations/#example_1","title":"Example","text":"
$ mongooseimctl gdpr retrievePersonalData --username 1661-175924-881845-449bca06515e060a --domain localhost --resultFilepath /home/mongooseim/gdpr/1661-175924-881845-449bca06515e060a.zip\n
"},{"location":"operation-and-maintenance/gdpr-considerations/#removal-of-personal-data","title":"Removal of Personal Data","text":"

mongooseimctl account removeUser --user <jid>

It removes the user's account along with all associated personal data accessible to the server (see \"Technical limitations\" section below).

"},{"location":"operation-and-maintenance/gdpr-considerations/#example_2","title":"Example","text":"
$ mongooseimctl account removeUser --user 1661-175924-881845-449bca06515e060a@localhost\n{\n  \"data\" : {\n    \"account\" : {\n      \"removeUser\" : {\n        \"message\" : \"User 1661-175924-881845-449bca06515e060a@localhost successfully unregistered\",\n        \"jid\" : \"1661-175924-881845-449bca06515e060a@localhost\"\n      }\n    }\n  }\n}\n
"},{"location":"operation-and-maintenance/gdpr-considerations/#technical-limitations-of-gdpr-retrieval-and-removal","title":"Technical limitations of GDPR retrieval and removal","text":"

Both GDPR retrieval and removal will process the data available via configured extensions and database(s). If a part of personal information is managed by an extension that is e.g. temporarily disabled, it won't be retrieved/deleted.

If any MIM extension you had enabled on production is now disabled or you've switched one of them (or e.g. auth module) to another database, it is possible that some personal data will not be retrieved or removed as expected. In such case, please consider starting a separate MIM instance that is configured to access all places, where personal data may be stored. You may also extract the missing pieces of information on your own, however we won't cover the details of this method in this guide.

Please also visit Known issues page to learn about a mod_mam_muc issue that may manifest in some environments.

"},{"location":"operation-and-maintenance/known-issues/","title":"Known issues","text":"

This document provides a list of all known issues with MongooseIM operation and configuration. You may also find proposed workarounds if any are available.

"},{"location":"operation-and-maintenance/known-issues/#missing-muc-light-room-config-fields-with-rdbms-backend","title":"Missing MUC Light room config fields with RDBMS backend","text":"

Before MongooseIM 3.5.x (incl.) new MUC Light rooms could be created with some config fields absent in the RDBMS table. These options couldn't be re-added later by changing the room config via requests from the clients.

It happened when the default config was a subset of the schema, and the client hasn't provided these values when a room was created.

Please note that this issue was resolved from MIM 3.6.0 onwards as the default_config option was deleted.

"},{"location":"operation-and-maintenance/known-issues/#how-to-fix-this","title":"How to fix this?","text":"

You have to iterate over all rooms in the DB (muc_light_rooms table) and add missing entries to the muc_light_config table. Every option is inserted as a separate row and is stored as plain text, so it should be straightforward.

Let's say you were using the following config in mongooseim.cfg:

{config_schema, [\n                 \"roomname\",\n                 \"subject\",\n                 \"background\",\n                 \"notification_sound\"\n                ]},\n{default_config, [\n                  {\"roomname\", \"The room\"},\n                  {\"subject\", \"Chit-chat\"}\n                 ]}\n

Your client application has created some rooms without the background option by mistake.

For every id in the muc_light_rooms table, you need to execute:

INSERT INTO muc_light_config(room_id, opt, val) VALUES ('put id here', 'background', 'new default value');\n
"},{"location":"operation-and-maintenance/known-issues/#mssql-connectivity-via-odbc","title":"MSSQL connectivity via ODBC","text":"

We have observed some issues with he ODBC driver used by MongooseIM in the past. The problems should now be resolved, and MSSQL is verified to work on Ubuntu 20.04.2 LTS.

"},{"location":"operation-and-maintenance/known-issues/#gdpr-retrieval-for-mam-muc-limitation","title":"GDPR retrieval for MAM MUC limitation","text":"

When the personal data retrieval is executed for a user in a specific domain, Message Archive Management for groupchats must be running for this particular domain. This is the case for most configurations, but the problem manifests when a MongooseIM operator configures mod_mam_muc/mod_mam to start only for a subset of domains supported by the cluster (host_config option).

In such case, personal data stored by MAM MUC will not be retrieved for this user.

"},{"location":"operation-and-maintenance/known-issues/#proposed-workaround","title":"Proposed workaround","text":"

Start a dedicated MongooseIM instance with a slightly different config, which enables Message Archive Management for the user's domain. This instance doesn't have to be clustered with other nodes and doesn't have to be accessible for actual users.

After a successful retrieval, this instance may be terminated and deleted if necessary.

"},{"location":"operation-and-maintenance/tls-distribution/","title":"Distribution over TLS","text":"

It's possible to use TLS for communication between MongooseIM cluster nodes. To enable it, find the directory of your release, below it look for etc/vm.dist.args and, inside the file, the section about the distribution protocol:

## Use TLS for connections between Erlang cluster members.\n## Don't forget to override the paths to point to your certificate(s) and key(s)!\n## Once a connection is established, Erlang doesn't differentiate between\n## a client and a server - the same certs/keys can be used on both sides.\n#-proto_dist inet_tls\n#-ssl_dist_opt server_certfile   /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/fake_cert.pem client_certfile   /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/fake_cert.pem\n#              server_keyfile    /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/fake_key.pem  client_keyfile    /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/fake_key.pem\n#              server_cacertfile /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/cacert.pem    client_cacertfile /Users/erszcz/work/esl/mongooseim/_build/mim1/rel/mongooseim/priv/ssl/cacert.pem\n#              client_verify     verify_peer\n#              server_verify     verify_peer\n#              server_fail_if_no_peer_cert true\n

By default, the proto_dist as well as the following options for configuring the cluster member are commented out. Enable them and provide the correct paths to your CA certificate, server certificate and server key.

There's a number of caveats to remember about when running Erlang distribution over TLS:

  • TLS-enabled and non-TLS Erlang nodes can't communicate with one another. Remember about it when trying to run erl -[s]name ... and communicating with the server.

  • Establishing a TLS connection will fail if a certificate isn't found in the specified location. You might receive a log message indicating that when nodes try to connect:

    2017-03-10 16:16:03.844 [warning] <0.4218.2> global: mongooseim@localhost failed to connect to fed1@localhost\n

    If the pointed-at certificate/key/CA-certificate file doesn't exist, it won't be reported before trying to connect. Look for (grep) the log message on all cluster nodes, as the message doesn't have to appear on all nodes if a connection fails.

  • You can switch a cluster from running non-TLS distribution, to TLS distribution by shutting down a node, enabling TLS on it, starting it up again, and repeating the steps for each remaining node. Again, nodes with and without TLS enabled won't be able to communicate with one another.

  • It's possible to fortify an Erlang cluster further than the Mongoose's preconfigured vm.dist.args does. This includes: checking certificate revocation status against a CA's Certificate Revocation List, securing/disabling EPMD (Erlang Port Mapper Daemon), using custom certificate verification functions. For details on these steps please refer to Erlang Distribution over TLS and Erlang (and Elixir) distribution without epmd.

"},{"location":"rest-api/Administration-backend/","title":"MongooseIM's REST API for backend administration","text":""},{"location":"rest-api/Administration-backend/#configuration","title":"Configuration","text":"

To enable the commands, you need to hook the mongoose_admin_api module to an HTTP endpoint as described in the admin REST API handlers configuration section of the HTTP listeners documentation.

"},{"location":"rest-api/Administration-backend/#openapi-specifications","title":"OpenAPI specifications","text":"

Read the Swagger documentation for more information.

"},{"location":"rest-api/Client-frontend/","title":"MongooseIM's REST API for frontend or client","text":"

In addition to the regular XMPP connection methods such as TCP (with TLS/STARTTLS), WebSockets and BOSH, MongooseIM provides parts of its functionality over a REST API.

"},{"location":"rest-api/Client-frontend/#assumptions","title":"Assumptions","text":"
  1. Every request has to be authenticated. Please see the Authentication section for more details.
  2. We strongly advise that this API is served over HTTPS.
  3. User registration has to be done via other methods (f.e. using the REST API for backend services).
  4. The relevant endpoint has to be configured on the server side. See the configuration section.
  5. A list of provided actions is documented with Swagger. See the specification.
"},{"location":"rest-api/Client-frontend/#authentication","title":"Authentication","text":"

MongooseIM uses Basic Authentication as an authentication method for the REST API.

Basic authentication is a simple authentication scheme built into the HTTP protocol. Each HTTP request to the client REST API has to contain the Authorization header with the word Basic followed by a space and a base64-encoded string username@host:password, where:

  • username@host is the user's bare JID,
  • password is the password used to register the user's account.

For example, to authorize as alice@localhost with the password secret, the client would send a header:

Authorization: Basic YWxpY2VAbG9jYWxob3N0OnNlY3JldA==\n
"},{"location":"rest-api/Client-frontend/#configuration","title":"Configuration","text":"

Handlers have to be configured as shown in the REST API configuration example to enable REST API.

In order to get the client REST API up and running simply copy the provided example. For more details about possible configuration parameters please see the relevant documentation of the HTTP listeners, in particular the client REST API handlers section.

"},{"location":"rest-api/Client-frontend/#smack-library-support","title":"Smack library support","text":"

REST API can fetch messages for Smack Stanza Properties.

For example if we have properties in the stanza like:

    <message xml:lang='en' to='alice@localhost' id='123' type='chat'>\n      <body xml:lang='en_US'>Hi!</body>\n      <thread parent='7edac73ab41e45c4aafa7b2d7b749080'>\n        e0ffe42b28561960c6b12b944a092794b9683a38\n      </thread>\n      <properties xmlns=\"http://www.jivesoftware.com/xmlns/xmpp/properties\">\n          <property>\n              <name>some_number</name>\n              <value type='integer'>123</value>\n          <property>\n          <property>\n              <name>some_string</name>\n              <value type='string'>abc</value>\n          <property>\n      </properties>\n    </message>\n
then in the final json message these properties will be converted to json map without tag names and all types will be taken as string:
    {   \"to\": \"alice@localhost\",\n        \"timestamp\": 1531329049949,\n        \"id\": \"123\",\n        \"from\": \"bob@localhost\",\n        \"body\": \"Hi!\",\n        \"thread\": \"e0ffe42b28561960c6b12b944a092794b9683a38\",\n        \"parent\": \"7edac73ab41e45c4aafa7b2d7b749080\",\n        \"properties\":{\n            \"some_number\":\"123\",\n            \"some_string\":\"abc\"\n        }\n    }\n

"},{"location":"rest-api/Client-frontend/#openapi-specifications","title":"OpenAPI specifications","text":"

See the Swagger documentation for more information.

"},{"location":"tutorials/CETS-configure/","title":"How to configure MongooseIM to use CETS instead of Mnesia","text":""},{"location":"tutorials/CETS-configure/#cets-config-example","title":"CETS Config Example","text":"

CETS is a library, which allows to replicate in-memory data across the MongooseIM cluster. It could be used to store:

  • information about online XMPP sessions;
  • information about outgoung S2S connections;
  • stream management session IDs;
  • information about online MUC rooms.

If you want to use CETS instead of Mnesia, ensure that these options are set:

[general]\n  sm_backend = \"cets\"\n  component_backend = \"cets\"\n  s2s_backend = \"cets\"\n\n[internal_databases.cets]\n\n# The list of modules that use CETS\n# You should enable only modules that you use\n[modules.mod_stream_management]\n  backend = \"cets\"\n\n[modules.mod_bosh]\n  backend = \"cets\"\n\n[modules.mod_muc]\n  online_backend = \"cets\"\n\n[modules.mod_jingle_sip]\n  backend = \"cets\"\n

Ensure that outgoing_pools are configured with RDBMS, so CETS could get a list of MongooseIM nodes, which use the same relational database and cluster them together.

A preferred way to install MongooseIM is Helm Charts on Kubernetes, so it allows to set volatileDatabase to cets and the values would be applied using Helm's templates

"},{"location":"tutorials/CETS-configure/#cets-with-the-file-discovery-backend","title":"CETS with the file discovery backend","text":"

It is possible to read a list of nodes to cluster from a file. But MongooseIM does not modify this file, so it is the task for the operator to update the file. But MongooseIM would reread the file without the restart:

[internal_databases.cets]\n    backend = \"file\"\n    node_list_file = \"/etc/mongooseim/mongooseim_nodes.txt\"\n

And the format of the node_list_file file is a new line separated list of nodes:

mongooseim@host1.example.com\nmongooseim@host2.example.com\nmongooseim@host3.example.com\n

File backend for CETS is only useful if you do not use an RDBMS database. You could use some external script to get the list of nodes from the AWS CLI command or some other way.

"},{"location":"tutorials/Docker-build/","title":"How to build and run MongooseIM docker image","text":"

The instruction below will guide you through the process of building and running the MongooseIM docker image.

"},{"location":"tutorials/Docker-build/#requirements","title":"Requirements","text":"

To follow this guide you need to have docker installed and the MongooseIM GitHub repository cloned locally.

"},{"location":"tutorials/Docker-build/#building-docker-image","title":"Building docker image","text":"

To build a MongooseIM image, navigate to the main repo directory (referenced as $REPO in this guide) and execute:

./tools/build-docker-from-remote.sh\n

which will build a MongooseIM docker image based on the current local commit if it is available on the remote.

Alternatively, it is possible to build a docker image based on any commit available on remote (commit hash referenced as $COMMIT_HASH), by executing:

./tools/build-docker-from-remote.sh $COMMIT_HASH\n
"},{"location":"tutorials/Docker-build/#running-docker-image","title":"Running docker image","text":"

Full tutorial on running a docker image is available on mongooseim-docker GitHub. Here only simple and one-node configuration will be presented. In order to run it execute:

docker run -dt -h first-node --name first-node -e JOIN_CLUSTER=false mongooseim\n

which starts a single MongooseIM node named first-node.

"},{"location":"tutorials/How-to-build/","title":"How to build MongooseIM","text":"

Instructions provided in this page are verified for:

  • Rocky Linux 8
  • Ubuntu 16.04 LTS (Xenial)
  • Ubuntu 18.04 LTS (Bionic)
  • macOS 13.3 (Ventura)

For any other OS versions, the instructions should still work, however, some steps or file paths may be different.

"},{"location":"tutorials/How-to-build/#requirements","title":"Requirements","text":"

To compile MongooseIM you need:

Rocky/AlmaUbuntumacOS
  • Make: make,
  • C and C++ compiler: gcc, g++,
  • Erlang/OTP 26.0 or higher:
    • erlang EPEL package, or,
    • esl-erlang from Erlang Solutions website, or,
    • install using kerl,
  • OpenSSL 0.9.8 or higher, for STARTTLS, SASL and SSL encryption: openssl and openssl-devel,
  • ODBC library: unixODBC-devel,
  • Zlib 1.2.3 or higher: zlib-devel.
  • Make: make,
  • C and C++ compiler: gcc, g++,
  • Erlang/OTP 24.0 or higher:
    • erlang package, or,
    • esl-erlang from Erlang Solutions website, or,
    • install using kerl,
  • OpenSSL 0.9.8 or higher, for STARTTLS, SASL and SSL encryption: olibssl-dev,
  • ODBC library: unixodbc-dev,
  • Zlib 1.2.3 or higher: zlib1g-dev.
  • Make, C and C++ compiler: Xcode Command Line Tools,
  • Erlang/OTP 24.0 or higher:
    • erlang from Homebrew,
    • install using kerl,
  • OpenSSL 0.9.8 or higher, for STARTTLS, SASL and SSL encryption: openssl from Homebrew
  • ODBC library: unixodbc from Homebrew.
"},{"location":"tutorials/How-to-build/#preparing-the-environment","title":"Preparing the environment","text":"Rocky/AlmaUbuntumacOS

Please install the required dependencies:

sudo yum install git make zlib-devel openssl openssl-devel unixODBC-devel gcc gcc-c++\nwget https://binaries2.erlang-solutions.com/rockylinux/8/esl-erlang_26.2.4_1~rockylinux~8_x86_64.rpm\nsudo dnf -Uvh esl-erlang_26.2.4_1~rockylinux~8_x86_64.rpm\n

Now, please proceed to the \"Building\" section.

Please install the required dependencies:

sudo apt install git make zlib1g-dev libssl-dev unixodbc-dev gcc g++ erlang\n

Now, please proceed to the \"Building\" section.

Step 1

Install Homebrew to manage packages on your Mac. You may use a different package manager but you'll need to figure out the package names and file paths on your own.

Step 2

Install Xcode Command Line Tools.

xcode-select --install # install compilation tools\n

Step 3

Install dependencies with Brew.

brew install erlang openssl unixodbc\n

Step 4

Add OpenSSL paths to the compiler and linker environment variables:

export LDFLAGS=\"-L/usr/local/opt/openssl/lib\"\nexport CFLAGS=\"-I/usr/local/opt/openssl/include\"\n

Now, please proceed to the \"Building\" section.

"},{"location":"tutorials/How-to-build/#building","title":"Building","text":"

To compile MongooseIM, navigate to the main repo directory (referenced as $REPO in this guide) and execute:

make [rel]\n

rel is optional as it is the default target. This will download all dependencies, compile everything and build a prod release.

If a more advanced release is required (with only specific DB support, e.g. mysql or pgsql) or you want to set the prefix or user for the installation script please refer to the release configuration page in our documentation.

The make rel commands will generate a self-contained OTP system structure in the project's _build/prod/rel/mongooseim subdirectory. The contents of that directory are as follows:

  • bin - startup/administration scripts,
  • etc - configuration files,
  • lib - MongooseIM binary, header and runtime files,
  • var - spool directory,
  • log - log file directory,
  • releases - release files directory.
"},{"location":"tutorials/How-to-build/#running-mongooseim","title":"Running MongooseIM","text":"

To run MongooseIM from the project tree after compiling it, change the directory to $REPO/_build/prod/rel/mongooseim.

There you can use the mongooseim command line administration script to start and stop MongooseIM. For example, this command will start the server:

bin/mongooseim start\n

You can also run the server in interactive mode (drop into an Erlang shell):

bin/mongooseim live\n

There's also a tool called mongooseimctl to perform some operations on a running instance, e.g.:

$ bin/mongooseimctl status\nMongooseIM node mongooseim@localhost:\n    operating system pid: 3105\n    Erlang VM status: started (of: starting | started | stopping)\n    boot script status: started\n    version: 3.4.0-7-gaec944c92 (as mongooseim)\n    uptime: 0 days 00:00:12\n    distribution protocol: inet_tcp\n    logs:\n        log/mongooseim.log\n
"},{"location":"tutorials/How-to-build/#building-the-testing-target-and-running-tests","title":"Building the testing target and running tests","text":"

For testing purposes there's a different make target available:

make devrel\n

which will generate releases mim1, mim2, mim3, fed1, reg1 in $REPO/_build/ and prepare them for testing and generating coverage reports.

In order to learn how to execute tests, please consult Testing MongooseIM page.

"},{"location":"tutorials/ICE_tutorial/","title":"How to set up MongooseICE (ICE/TURN/STUN server)","text":""},{"location":"tutorials/ICE_tutorial/#introduction","title":"Introduction","text":""},{"location":"tutorials/ICE_tutorial/#who-is-this-document-for","title":"Who is this document for?","text":"

This tutorial presents our TURN/STUN server in action. You get to see how to set up and configure MongooseICE and examine a system utilising its many talents.

Are you in need of an application requiring NAT traversal? Want to see how a TURN and STUN server would handle it? Or maybe you just like to tinker with interesting technologies and experience setting them up first hand?

If that's the case, this tutorial is for you.

"},{"location":"tutorials/ICE_tutorial/#what-is-the-end-result-of-this-tutorial","title":"What is the end result of this tutorial?","text":"

At the end of the tutorial you will have a working environment with two peers, one sending a live video to another. The peer-to-peer communication will not be obstructed by any NATs that may occur in the background. The live video stream is only an example here - there are many possible use cases for peer-to-peer communication with NAT traversal. We chose to build an example application that shows video streaming, because it's vivid, catchy and fun.

"},{"location":"tutorials/ICE_tutorial/#what-do-i-need-to-begin","title":"What do I need to begin?","text":"

Before you begin you have to prepare an environment for setting up the components used in this tutorial. Here's a list of things you'll need: * One Android phone (or at least an Android emulator). The video player in this tutorial is available only as an Android application. * RaspberryPi or any other device that is able to run Elixir code. Oh, and also has ffmpeg installed. We are going to use use RaspberryPi 3, to give this tutorial a hint of IoT. * At least one machine with a public IPv4 address. It is necessary, because both MongooseIM and MongooseICE servers need to be accessible by all devices that are used in this demo system. You could use a private, local IP address, but then you would need to ensure that your phone and the RaspberryPi are behind some kind of a NAT relative to this IP address.

Note

The demo will probably work without the NAT, but then there is no point in setting up a TURN server.

We are going to use 2 VPS (Virtual Private Server) that are located somewhere far far away, both having public IPv4 address. Let's say MongooseICE is bound to 1.1.1.1, and MongooseIM to 2.2.2.2.

"},{"location":"tutorials/ICE_tutorial/#general-architecture-of-the-environment-built-with-this-tutorial","title":"General architecture of the environment built with this tutorial","text":"

This is the architecture of the system we are building:

As we know by now, MongooseIM is bound to 2.2.2.2/myxmpp.com and MongooseICE to 1.1.1.1. We also have a RaspberryPi that is connected to a private network (so is behind some NAT) and an Android phone that is connected to an LTE network and also is behind the carrier's NAT.

"},{"location":"tutorials/ICE_tutorial/#ice-notes","title":"ICE notes","text":"

The end result of this tutorial not only uses MongooseICE and MongooseIM servers but also uses custom version of Mangosta-Android and DemoStreamerICE. Both projects are custom modified and custom made respectively in order to showcase the video streaming using the data relay capabilities provided by MongooseICE. The streaming itself, along with the signalling protocol, were prepared only for the case of this demo and are not a part of the platform. Those components exist only to visualize what can be achieved with MongooseICE and what can be built on top of it.

"},{"location":"tutorials/ICE_tutorial/#setting-up-mongooseim-signalling","title":"Setting up MongooseIM (signalling)","text":"

The ICE is nothing without signalling. The signalling protocol itself can be designed specifically for the application that is being deployed or can be implemented based on some standards, e.g. Jingle. Here, we chose to implement the simplest signalling possible, i.e. sending relay addresses via XMPP messages. No matter if we decide to go with this approach or with Jingle, we can use the MongooseIM XMPP server as a transport layer for the signalling. In order to enable signalling we need an instance of MongooseIM running with the simplest configuration, since the only thing we need from it is to provide us with means to communicate between two peers.

"},{"location":"tutorials/ICE_tutorial/#configuration","title":"Configuration","text":"

You can find MongooseIM installation instructions on this page. Once you have it installed, you need to modify the mongooseim.toml config file:

[general]\n  hosts = [\"localhost\", \"myxmpp.com\"]\n
This sets the virtual hostname of the XMPP server, so that you can register users in this domain. After that, you can start MongooseIM with
mongooseimctl start\n

"},{"location":"tutorials/ICE_tutorial/#users","title":"Users","text":"

After we finish setting up MongooseIM, we need to register some users. For this demo we need two users: movie@myxmpp.com and phone@myxmpp.com, for RaspberryPi and the Android phone respectively. In order to do that, type:

mongooseimctl account registerUser --username movie --domain myxmpp.com --password xmpp_password\nmongooseimctl account registerUser --username phone --domain myxmpp.com --password xmpp_password\n

on the machine that has MongooseIM installed.

As you can see here, we have created those two users, both with the password xmpp_password for simplicity.

"},{"location":"tutorials/ICE_tutorial/#setting-up-mongooseice-turnstun-server","title":"Setting up MongooseICE (TURN/STUN server)","text":"

Now, since MongooseIM handles the signalling, we need the TURN relay and the STUN server to send peer-to-peer data. For that we are going to use the star of this tutorial - MongooseICE.

"},{"location":"tutorials/ICE_tutorial/#how-to-get-and-configure","title":"How to get and configure","text":"

The whole documentation that describes all options and deployment methods, can be found on the project's github page. Let's get to it! (this command assumes that we are on the server for MongooseICE and that it has Docker installed):

docker run -it --net=host -e \"MONGOOSEICE_UDP_RELAY_IP=1.1.1.1\" -e \"MONGOOSEICE_STUN_SECRET=secret\" -e \"MONGOOSEICE_UDP_REALM=myrelay\" mongooseim/mongooseice:0.4.0\n

This command starts the MongooseICE server in the Docker container, attaching its virtual network interface to the network interface of the host machine the Docker daemon is running on. There are three important configuration options we have to set via environment variables:

  • MONGOOSEICE_UDP_RELAY_IP - This is the IP address that MongooseICE provides data relay on. This should be set to public IPv4 address.
  • MONGOOSEICE_STUN_SECRET - This is a secret password that TURN clients need to provide to connect to this server.
  • MONGOOSEICE_UDP_REALM - This is just a name for your TURN relay.

And that's it! MongooseICE is now ready to roll!

"},{"location":"tutorials/ICE_tutorial/#setting-up-mangosta-android","title":"Setting up Mangosta-Android","text":""},{"location":"tutorials/ICE_tutorial/#how-to-get-and-install","title":"How to get and install","text":"

The source code of the video-stream-demo-enabled Mangosta-Android can be found on the ice_demo_kt branch. If you want to tinker with it and compile it yourself, you can do that. All you need is Android Studio 2.3+. The compilation is pretty straightforward, so I'm not going to explain it here. If you are interested in how it works, most of the code is in the inaka.com.mangosta.videostream package. If you don't want to compile this application from source, you can just install this .apk on your phone and that's it.

"},{"location":"tutorials/ICE_tutorial/#how-to-configure","title":"How to configure","text":"

Right after you start Mangosta-Android for the first time, you will need to login to your XMPP server. In order to do that, just enter the JID you have created for the phone (phone@myxmpp.com), the password (xmpp_password) and the server address (2.2.2.2 or myxmpp.com if you've set up the domain to actually point to this IP address), and then confirm by clicking \"Enter\".

After we log in, we can start setting up the connection to the MongooseICE server we set up before. The process is shown on the screenshots below.

On the \"Configure ICE\" screen we have to set 5 fields up:

  • TURN server address - IPv4 address of our MongooseICE
  • TURN Server port - since we did not set the port while configuring MongooseICE it uses a default one - 3478
  • TURN Realm - Realm name we have set via MONGOOSEICE_UDP_REALM variable. In our case it's \"myrelay\".
  • TURN username - Current version of MongooseICE ignores this, so you may leave it as is.
  • TURN password - The password that we have set via MONGOOSEICE_STUN_SECRET variable. In our case it's \"secret\"

And that would be all. Now you can click \"TEST CONNECTION\" to, well..., test the connection. If everything works, you can \"SAVE\" the settings. Now your Mangosta-Android is ready to play streamed video, but we still need the source...

"},{"location":"tutorials/ICE_tutorial/#setting-up-raspberrypi","title":"Setting up RaspberryPi","text":"

Let's configure the video source now. In our case it will be a RaspberryPi with Elixir and ffmpeg installed running our ICE demo application.

"},{"location":"tutorials/ICE_tutorial/#the-software","title":"The software","text":"

For this demo we provide a simple XMPP client that also is able to send live video stream using ffmpeg whenever other peer asks for it via XMPP. This client is written in Elixir, so we can run it from source quite easily.

"},{"location":"tutorials/ICE_tutorial/#how-to-get-and-configure_1","title":"How to get and configure","text":"

You can get the client's sources here. For now we only need to run it, so let's get to it (on our RaspberryPi):

git clone https://github.com/esl/ice_demo.git\ncd ice_demo\nmix deps.get\niex -S mix\n

After a while we should get into Elixir shell. In order to enable the streamer, we need to start it, providing some configuration options (in the Elixir shell):

opts = [\njid: \"movie@myxmpp.com\",\npassword: \"xmpp_password\",\nhost: \"myxmpp.com\",\nturn_addr: \"1.1.1.1:3784\"\nturn_username: \"username\",\nturn_secret: \"secret\",\nvideo_file: \"/home/pi/sintel.h264\"\n]\nICEDemo.start_movie(opts)\n

The first 3 options are all about connecting to the XMPP server - we use \"movie@myxmpp.com\" user that we created earlier. Next 3 options are about connecting to the MongooseICE server. Those are similar to ones we set in Mangosta-Android. The last one points to the video file that will be streamed on request. This file has to be raw, H.264-encoded, video-only file. If you are not sure how to get one, you can just use this one (pre-rendered Sintel, OpenBlender project). With this configuration, our RaspberryPi is ready to stream!

"},{"location":"tutorials/ICE_tutorial/#the-end-result","title":"The end result","text":""},{"location":"tutorials/ICE_tutorial/#playing-the-video","title":"Playing the video","text":"

Now we finally can get out phone and start streaming the video! In order to do that, we have to click the \"New video stream\" button as shown on the screenshots below, enter the JID of the RaspberryPi and confirm with the \"Stream!\" button.

Hopefully, now you can see the video on your own mobile device.

"},{"location":"tutorials/Jingle-SIP-setup/","title":"Jingle/SIP setup proof of concept","text":"

This tutorial will show you how to configure MongooseIM, Routr (a SIP server) and client applications to demonstrate how the Jingle/SIP integration works.

"},{"location":"tutorials/Jingle-SIP-setup/#prerequisites","title":"Prerequisites","text":"

We are going to use the following open source software:

  • MongooseIM - https://github.com/esl/MongooseIM

    • see How-to-build for details on building. It's important to remember to run the configuration script with with-jingle-sip flag set: tools/configure with-jingle-sip. Without this, third party dependencies required by the Jingle/SIP translator will not be included in the release.
  • Routr (SIP server) - https://routr.io

    • I recommend downloading binaries for your system from official source.
  • Jitsi (XMPP and SIP client application) - https://desktop.jitsi.org
  • Otalk - web based XMPP client - https://github.com/otalk/otalk-im-client
    • Follow the instructions on otalk-im-client#installing to run it

We will use 2 users xmpp.user@xmpp.example and sip.user@sip.example.

"},{"location":"tutorials/Jingle-SIP-setup/#configuring-routr","title":"Configuring Routr","text":"

First the domain sip.example needs to be added to domains served by Routr. To do it, paste the following content to config/domains.yml in the directory where Routr was:

- apiVersion: v1beta1\n  kind: Domain\n  metadata:\n    name: SIP domain\n  spec:\n    context:\n      domainUri: sip.example\n

Then the sip.user@sip.example needs to be added to config/agents.yml like below:

- apiVersion: v1beta1\n  kind: Agent\n  metadata:\n    name: SIP User\n  spec:\n    credentials:\n      username: 'sip.user'\n      secret: '1234'\n    domains: [sip.example]\n

Now Routr can be started with

./routr\n

If all goes well we'll see the following output:

[INFO ] Starting Routr\n[INFO ] Listening  on 10.152.1.27:5060 [udp]\n[INFO ] Listening  on 10.152.1.27:5060 [tcp]\n[INFO ] Starting Location service\n[INFO ] Starting Registry service\n[INFO ] Starting Restful service (port: 4567, apiPath: '/api/v1beta1')\n

It is important to remember the IP address as it'll be used in next point.

"},{"location":"tutorials/Jingle-SIP-setup/#a-side-note","title":"A side note","text":"

In Routr's logs you may see messages like

[WARN ] Unable to register with Gateway -> sip.provider.net. (Verify your network status)\n

or

[ERROR] java.lang.RuntimeException: javax.sip.header.TooManyHopsException: has already reached 0!\n

They can be ignored for the purpose of the tutorial.

"},{"location":"tutorials/Jingle-SIP-setup/#configuring-etchosts","title":"Configuring /etc/hosts","text":"

In my case the IP reported by Routr was 10.152.1.27. Now we need to use this to update /etc/hosts file like below:

10.152.1.27     sip.example xmpp.example\n
"},{"location":"tutorials/Jingle-SIP-setup/#configuring-mongooseim","title":"Configuring MongooseIM","text":"

At this point I assume that MongooseIM was built with make rel, that it is running and the current working directory is _build/prod/rel/mongooseim. Similar to Routr, MongooseIM also needs to know which hosts to server. Please replace the default host defined in etc/mongooseim.toml; the line:

[general]\n  hosts = [\"localhost\"]\n

should be changed to:

[general]\n  hosts = [\"xmpp.example\", \"sip.example\"]\n

Now we need to enable mod_jingle_sip, please add the following line in modules list (somewhere around line 740 in the same file)

[modules.mod_jingle_sip]\n  proxy_host = \"sip.example\"\n

More details on MongooseIM configuration you can find in Configuration and in Modules configuration

Now we are registering both users in MongooseIM by calling the following commands:

mongooseimctl account registerUser --username xmpp.user --domain xmpp.example --password test_pass\nmongooseimctl account registerUser --username sip.user --domain sip.example --password test_pass\n

Yes, we need to have the sip.user@sip.example registered in MongooseIM. This is needed because a Jingle call can be initiated by a regular XMPP client only when the app knows the other user's full JID. The easiest way to achieve that is to exchange presence information between these 2 users. This can happen automatically if 2 xmpp users have each other in the roster.

The roster can be set up with the following command:

mongooseimctl roster setMutualSubscription --userA xmpp.user@xmpp.example --userB sip.user@sip.example --action CONNECT\n
"},{"location":"tutorials/Jingle-SIP-setup/#adding-users-to-jitsi","title":"Adding users to Jitsi","text":"

Now the sip.user@sip.example has to be added to Jitsi app. When the app is opened for the first time it will display a window to configure the user. Later users can be configured from the Preferences page.

"},{"location":"tutorials/Jingle-SIP-setup/#adding-a-sip-user","title":"Adding a SIP user","text":"

In order to add a user who connects to the SIP server we need to choose the SIP protocol from the available networks in Jitsi. In the SIP id field we put sip.user@sip.example and in the Password field we put 1234 as in the agents.yml file. Now we need to switch to Advanced options and go to the Connection tab. Here we need to unselect the Configure proxy automatically and put the IP of our Routr server, port number 5060 and TCP as the preferred transport.

"},{"location":"tutorials/Jingle-SIP-setup/#adding-an-xmpp-user","title":"Adding an XMPP user","text":"

Now we have to add sip.user@sip.example to Jitsi's XMPP network in order to connect this user to MongooseIM over XMPP. It's very similar to adding a user to Jitsi's SIP network, the only difference is the password, for the XMPP connection it's test_pass as set when registering the user in MongooseIM. Here we also need to go to the Advanced window and the Connection tab in order to put the IP address (the same as before) in the Connect Server field. Remember to check the Override server default options box.

To connect sip.user@sip.exmple to MongooseIM over XMPP is to cheat Jingle a bit, so that the client app for user sip.xmpp@xmpp.example can start the Jingle call. When Jitsi connects this user, it will likely display a warning about the server's certificate. This is because by default MongooseIM is configured with a freshly generated, self-signed certificate. We can click Continue anyway button in order to proceed.

"},{"location":"tutorials/Jingle-SIP-setup/#adding-user-to-otalk","title":"Adding user to Otalk","text":"

Please follow the instructions on https://github.com/otalk/otalk-im-client#installing in order to compile and run the app. If all goes well, you should see the following message printed in the console:

demo.stanza.io running at: http://localhost:8000\n

This means that the app is hosted on http://localhost:8000.

At this point I also recommend opening wss://localhost:5285/ws-xmpp in the same browser. This endpoint works correctly only for WebSocket connections but most probably you will be prompted about the certificate. This is again due to the self-signed certificate. We need to add an exception for this certificate in order to successfully connect from Otalk.

Now let's open http://localhost:8000 where the Otalk app is hosted. In the Log in section put xmpp.user@xmpp.example in the JID field and test_pass in the Password filed. The default WebSocket endpoint in the WebSocket or BOSH URL field needs to be changed to:

wss://localhost:5285/ws-xmpp\n

Mind the wss protocol, Otalk will not connect the user over WebSockets if for example https is put in the field.

Now we can hit the Go! button and the xmpp.user@xmpp.example will connect to MongooseIM.

"},{"location":"tutorials/Jingle-SIP-setup/#making-a-call","title":"Making a call","text":"

On the left side we can see that the user already has sip.user@sip.example in the roster and there should be a green dot indicating that the user is online. When we click on the contact, the Call button should appear allowing us to initiate the call.

In Jitsi, the following window should pop up:

Behind the scene the following SIP request was send from MongooseIM to Routr.

INVITE sip:sip.user@sip.example:5060 SIP/2.0\nVia: SIP/2.0/TCP localhost:5600;rport;branch=z9hG4bK1HMB3o-3mbahM\nFrom: xmpp.user <sip:xmpp.user@xmpp.example>;tag=aVEBue\nTo: sip.user <sip:sip.user@sip.example>\nCall-ID: ae602f16-d57d-4452-b83e-36e54bb6d325\nCSeq: 159913767 INVITE\nMax-Forwards: 70\nContent-Length: 2243\nContact: <sip:xmpp.user@localhost:5600;ob;transport=tcp>;+sip.instance=\"<urn:uuid:f45950f1-70cd-229d-6c2b-8c85903ce14e>\"\nContent-Type: application/sdp\nSupported: outbound,100rel,path\nAllow: PRACK,INVITE,ACK,CANCEL,BYE,OPTIONS,INFO,UPDATE,SUBSCRIBE,NOTIFY,REFER,MESSAGE\n\nv=0\no=- 1531401304 1531401304 IN IP4 127.0.0.1\ns=nksip\nc=IN IP4 127.0.0.1\nt=0 0\na=group:BUNDLE sdparta_0 sdparta_1\nm=audio 1436 UDP/TLS/RTP/SAVPF 109 9 0 8 101\na=sendrecv\na=mid:sdparta_0\na=setup:actpass\na=fingerprint:sha-256 44:84:41:8F:B7:A3:B7:37:BA:00:26:5E:B1:D6:AB:D0:56:56:CF:53:F2:05:DB:99:DE:D4:1C:63:A4:68:58:EA\na=ice-pwd:49ad0f02b4f5181c9af3c4006575e071\na=ice-ufrag:a3cc96e2\na=rtcp-mux\na=extmap:3 urn:ietf:params:rtp-hdrext:sdes:mid\na=extmap:2/recvonly urn:ietf:params:rtp-hdrext:csrc-audio-level\na=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\na=rtpmap:109 opus/48000/2\na=fmtp:109 useinbandfec=1;stereo=1;maxplaybackrate=48000\na=rtpmap:9 G722/8000\na=rtpmap:0 PCMU/8000\na=rtpmap:8 PCMA/8000\na=rtpmap:101 telephone-event/8000\na=fmtp:101 0-15\na=ssrc:1698222108 cname:{ce7fa171-069e-db4f-ba41-cfa4455c1033}\na=ssrc:1698222108 msid:{788b64bb-c4fc-b644-89b0-89f69c78f8b0} {2ba61f91-abca-3e48-84b7-85b57e8fdfb5}\nm=video 1031 UDP/TLS/RTP/SAVPF 120 121 126 97\na=sendrecv\na=mid:sdparta_1\na=setup:actpass\na=fingerprint:sha-256 44:84:41:8F:B7:A3:B7:37:BA:00:26:5E:B1:D6:AB:D0:56:56:CF:53:F2:05:DB:99:DE:D4:1C:63:A4:68:58:EA\na=ice-pwd:49ad0f02b4f5181c9af3c4006575e071\na=ice-ufrag:a3cc96e2\na=rtcp-mux\na=extmap:5 urn:ietf:params:rtp-hdrext:toffset\na=extmap:4 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\na=extmap:3 urn:ietf:params:rtp-hdrext:sdes:mid\na=rtpmap:120 VP8/90000\na=fmtp:120 max-fr=60;max-fs=12288\na=rtcp-fb:120 goog-remb\na=rtcp-fb:120 ccm fir\na=rtcp-fb:120 nack pli\na=rtcp-fb:120 nack\na=rtpmap:121 VP9/90000\na=fmtp:121 max-fr=60;max-fs=12288\na=rtcp-fb:121 goog-remb\na=rtcp-fb:121 ccm fir\na=rtcp-fb:121 nack pli\na=rtcp-fb:121 nack\na=rtpmap:126 H264/90000\na=fmtp:126 packetization-mode=1;level-asymmetry-allowed=1;profile-level-id=42e01f\na=rtcp-fb:126 goog-remb\na=rtcp-fb:126 ccm fir\na=rtcp-fb:126 nack pli\na=rtcp-fb:126 nack\na=rtpmap:97 H264/90000\na=fmtp:97 level-asymmetry-allowed=1;profile-level-id=42e01f\na=rtcp-fb:97 goog-remb\na=rtcp-fb:97 ccm fir\na=rtcp-fb:97 nack pli\na=rtcp-fb:97 nack\na=ssrc:823938224 cname:{ce7fa171-069e-db4f-ba41-cfa4455c1033}\na=ssrc:823938224 msid:{788b64bb-c4fc-b644-89b0-89f69c78f8b0} {a7f87c8d-6002-fd4c-badb-13383c759e48}\n

And Routr sent the Ringing response code to MongooseIM as soon as the Jitsi app displayed the incoming call window:

SIP/2.0 180 Ringing\nCSeq: 159913767 INVITE\nCall-ID: ae602f16-d57d-4452-b83e-36e54bb6d325\nFrom: \"xmpp.user\" <sip:xmpp.user@xmpp.example>;tag=aVEBue\nTo: \"sip.user\" <sip:sip.user@sip.example>;tag=9b4c72a3\nVia: SIP/2.0/TCP localhost:5600;rport=54071;branch=z9hG4bK1HMB3o-3mbahM;received=10.152.1.27\nContact: \"sip.user\" <sip:sip.user@10.152.1.27:53697;transport=tcp;registering_acc=sip_example>\nUser-Agent: Jitsi2.10.5550Mac OS X\nContent-Length: 0\n
"},{"location":"tutorials/Jingle-SIP-setup/#summary","title":"Summary","text":"

The example above showcases how you can use Jingle/SIP switch with the available open source software. Sonetel, who are this feature's sponsor, operate on a slightly different use case and utilize more of the functionality with their proprietary software. Current implementation makes following assumptions:

  • The peer-to-peer stream is always encrypted. This means that MongooseIM expects element <fingerprint> as described in XEP-0320: Use of DTLS-SRTP in Jingle Sessions to be in the content description. Not every open source XMPP client supporting Jingle supports this encryption.
  • MongooseIM expects that the 200 OK response contains at least one ICE candidate to set the peer-to-peer connection up.

This makes the current implementation a bit limited, but on the other hand the basic integration between XMPP and SIP world is already there. Based on the current state it can be improved and extended if needed.

"},{"location":"tutorials/client-certificate/","title":"How to Set up SASL client certificate authentication","text":""},{"location":"tutorials/client-certificate/#overview","title":"Overview","text":"

Clients connected to MongooseIM may authenticate with their TLS certificates. This method uses the SASL EXTERNAL mechanism.

"},{"location":"tutorials/client-certificate/#server-side-prerequisites","title":"Server-side prerequisites","text":""},{"location":"tutorials/client-certificate/#properly-configure-client-to-server-c2s-listener","title":"Properly configure Client-to-server (C2S) listener","text":"

A server must request the certificate from a client, so you'll need to set verify_mode option to \"peer\" and provide a path to CA chain that may be used for client's certificate check (cacertfile option).

Please check the Listener modules page for more information or simply follow the examples at the end of this section.

"},{"location":"tutorials/client-certificate/#properly-configure-http-listener","title":"Properly configure http listener","text":"

SASL EXTERNAL authentication is also possible for WebSocketSecure and BOSH connections over HTTPS. Similarly as in the client-to-server case, the server must request the certificate from the client. In this case it's enabled by adding the following options to the tls option of listen.http :

  • tls.verify_mode = \"peer\" - this is to tell Erlang's SSL to request the cert from the client
  • tls.cacertfile = \"ca.pem\" - this is to tell Erlang's SSL where the CA cert file is in order to check if the cert is correctly signed

Please check HTTP-based services configuration for more details regarding http listener configuration.

"},{"location":"tutorials/client-certificate/#enable-sasl-external-mechanism","title":"Enable SASL EXTERNAL mechanism","text":"

A SASL EXTERNAL authentication mechanism is disabled by default. In order to enable it, please configure auth.sasl_mechanisms option in the MongooseIM config file.

[auth]\n  sasl_mechanisms = [\"external\"]\n

Obviously the list may be longer, if the system should support both the certificate and password based authentication.

The SASL EXTERNAL authentication mechanism requires a digital client certificate. This digital certificate should contain xmppAddr field(s), which is always checked first. If there is more than one JID specified in the xmppAddr fields, the client must include the authorisation entity which corresponds to the one of the specified JIDs.

When no xmppAddr is specified, the cn (common name) field might be used to provide the client's username, but it is optional and can be configured with the sasl_external option in the auth section.

If the client certificate does not contain a JID, the client must provide one in authorisation entity.

For the details please refer to XEP-0178: Best Practices for Use of SASL EXTERNAL with Certificates.

"},{"location":"tutorials/client-certificate/#enable-compatible-authentication-method","title":"Enable compatible authentication method","text":"

You need to enable one of the following authentication methods by using the auth.methods option in the MongooseIM configuration file.

  • \"pki\" - accepts user credentials,
  • \"http\" - accepts user credentials if the provided certificate is known and valid
  • \"ldap\" - accepts user credentials if a corresponding user account exists in LDAP.
"},{"location":"tutorials/client-certificate/#self-signed-certificates","title":"Self-signed certificates","text":"

By default MongooseIM doesn't accept self-signed certs for the SASL-EXTERNAL authentication. For development purposes, it is possible to tell MongooseIM to accept them.

"},{"location":"tutorials/client-certificate/#self-signed-certificates-for-regular-tcptls-connections","title":"Self-signed certificates for regular TCP/TLS connections","text":"

In order to tell MongooseIM to accept self-signed certs, the listen.c2s.tls.verify_mode option needs to be configured like below:

[listen.c2s]\n  tls.verify_mode = \"selfsigned_peer\"\n  tls.disconnect_on_failure = false\n  tls.cacertfile = \"ca.pem\"\n

where the tls.disconnect_on_failure is a boolean with the following meaning only for just_tls:

  • true - the connection is closed if a certificate is invalid,
  • false - the connection isn't closed, but the certificate is not returned if it's invalid. This leads to an authentication failure but allows the client to choose a different auth method if available.

For fast_tls backend, the configuration is the same, only the disconnect_on_failure is ignored.

"},{"location":"tutorials/client-certificate/#self-signed-certificates-for-ws-or-bosh","title":"Self-signed certificates for WS or BOSH","text":"

In order to accept self-signed certs for WS or BOSH connections, the tls options for http listener must have the following configured:

[listen.http]\n  tls.verify_mode = \"selfsigned_peer\"\n  tls.cacertfile = \"ca.pem\"\n
"},{"location":"tutorials/client-certificate/#examples","title":"Examples","text":"

Certificate authentication only.

[listen.c2s]\n  port = 5222\n  (...)\n  tls.cacertfile = \"ca.pem\"\n  tls.verify_peer = true\n\n[listen.http]\n  port = 5285\n  (...)\n  tls.cacertfile = \"ca.pem\"\n  tls.verify_peer = true\n\n  [[listen.http.handlers.mod_bosh]]\n    host = \"_\"\n    path = \"/http-bind\"\n\n  [[listen.http.handlers.mod_websockets]]\n    host = \"_\"\n    path = \"/ws-xmpp\"\n\n[auth]\n  method = [\"pki\"]\n  sasl_mechanisms = [\"external\"]\n

Authentication with a client certificate (validated with provided CA chain) or password (validated with data stored in RDBMS).

[listen.c2s]\n  port = 5222\n  (...)\n  tls.cacertfile = \"ca.pem\"\n  tls.verify_peer = true\n\n[auth]\n  methods = [\"rdbms\", \"pki\"]\n  sasl_mechanisms = [\"scram_sha1\", \"external\"]\n
"},{"location":"tutorials/client-certificate/#client-certificate-prerequisites","title":"Client certificate prerequisites","text":"

SASL EXTERNAL will be offered by the server only when a client provides a valid certificate.

Please check documentation of a specific authentication backend you're going to use.

"},{"location":"tutorials/client-certificate/#usage-example-gajim","title":"Usage example - Gajim","text":"

Verified with Gajim 0.16.8, installed from package gajim-0.16.8-1.fc25.noarch.

"},{"location":"tutorials/client-certificate/#generate-client-certificate","title":"Generate client certificate","text":"
openssl genrsa -des3 -out rootCA.key 4096\nopenssl req -x509 -new -nodes -key rootCA.key -sha256 -days 1024 -out rootCA.crt\nopenssl genrsa -out client.key 2048\nopenssl req -new -key client.key -out client.csr # Remember to provide username as Common Name!\nopenssl x509 -req -in client.csr -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out client.crt -days 500 -sha256\nopenssl pkcs12 -export -inkey client.key -in client.crt -out client.p12\n
"},{"location":"tutorials/client-certificate/#configure-mongooseim","title":"Configure MongooseIM","text":"

See examples in the section above. We recommend using the first snippet for simplicity.

You don't need to pre-create a user account in order to log in with a certificate.

"},{"location":"tutorials/client-certificate/#add-an-account-in-gajim","title":"Add an account in Gajim","text":"
  1. Edit -> Accounts -> Add.
  2. Pick \"I already have an account I want to use\".
  3. Jabber ID is [Common Name from certificate]@localhost (domain is different if you've changed it in hosts option). Press \"Next\".
  4. Untick \"Connect when I press Finish\" and press \"Advanced\".
  5. Unfold \"Client certificate\" and choose the .p12 you've created earlier. Tick \"Certificate is encrypted\".
  6. Click \"Close\" and set status to \"Available\". Tell Gajim to ignore the unverified server certificate (by default it's self-signed).

If Gajim fails to connect, try to restart it. Version 0.16.8 sometimes \"forgets\" to ask for the client certificate password.

"},{"location":"tutorials/iOS_tutorial/","title":"Build a complete iOS messaging app using XMPPFramework","text":"

Read our blog posts:

  • Build a complete iOS messaging app using XMPPFramework - Tutorial Part 1
  • Build a complete iOS messaging app using XMPPFramework - Part 2
"},{"location":"tutorials/iOS_tutorial/#yaxt-yet-another-xmpp-tutorial","title":"YAXT??! Yet another XMPP tutorial?","text":"

Well, this is going to be another tutorial, but I\u2019m going to try to make it a little bit different. This is an XMPP tutorial from an iOS developer\u2019s perspective. I\u2019ll try to answer all the questions I had when I started working in this area. This journey is going to go from no XMPP knowldege at all to having a fully functional instant messaging iOS appusing this cool protocol. We are going to be using the super awesome (yet overwhelming at the beginning\u2026) XMPPFramework library, and the idea is also to also mix in some iOS concepts that you are going to need for your app.

"},{"location":"tutorials/iOS_tutorial/#whats-xmpp","title":"What\u2019s XMPP?","text":"

From Wikipedia: Extensible Messaging and Presence Protocol (XMPP) is a communications protocol for message-oriented middleware based on XML.

This basically means XMPP is a protocol for exchanging stuff. What kind of stuff? Messages and presences. We all know what messages are, but what about presences? A presence is just a way of sharing a \u201cstatus\u201d, that\u2019s it. You can be \u2018online\u2019, 'offline\u2019, 'having lunch\u2019, or whatever you want. Also there\u2019s another important word: Extensible meaning it can grow. It started as an instant messaging protocol and it has grown into multiple fields for example IoT (Internet of Things). And last, but not least: every piece of information we are going to exchange under this protocol is going to be XML. I can heard you complaining but\u2026 Come on, it\u2019s not that bad!

"},{"location":"tutorials/iOS_tutorial/#why-do-we-need-xmpp-why-not-just-rest","title":"Why do we need XMPP? Why not just REST?","text":"

Well what other options do we have? On the one hand, a custom solution means building everything from scratch, that takes time. On the other hand, we have XMPP, a super tested technology broadly used by millions of people every day, so we can say that\u2019s an advantage over a custom approach.

Every time I talk about XMPP, someone asks me 'Why not just REST?\u2019. Well, there is a misconception here. REST is not a protocol, it\u2019s just a way of architecting a networked application; it\u2019s just a standardized way of doing something (that I love btw). So let\u2019s change the question to something that makes more sense: \u201cWhy not just build a custom REST chat application?\u201d. The first thing that comes to my mind is what I already explained in the previous paragraph, but there is something else. How do I know when someone has sent me a message? For XMPP this is trivial: we have an open connection all the time so, as soon as a message arrives to the server, it will send us the message. We have a full-duplex. On the other hand, the only solution with REST is polling. We will need to ask the server for new messages from time to time to see if there is something new for us. That sucks. So, we will have to add a mechanism that allows us to receive the messages as soon as they are created, like SSE or WebSockets.

There is one more XMPP advantage over a custom REST chat application. REST uses HTTP, an application level protocol that is built on top of a transport level protocol: TCP. So every time you want to use your REST solution, you will need HTTP, a protocol that is not always available everywhere (maybe you need to embed this in a cheap piece of hardware?). Besides, we have XMPP built on top of TCP that\u2019s going to be always available.

"},{"location":"tutorials/iOS_tutorial/#whats-the-basic-stuff-i-need-to-know-to-get-started","title":"What\u2019s the basic stuff I need to know to get started?","text":"

Well, you know a lot already but let\u2019s make a list. Lists are always good:

  • XMPP is built on top of TCP. It keeps an open connection all the time.
  • Client/Server architecture. Messages always go through a server.
  • Everything we send and receive is going to be XML and it\u2019s called Stanza.
  • We have three different types of stanzas: iq, message and presence.
  • Every individual on the XMPP network is univocally identified by a JID (Jabber ID).
  • All the stanzas are contained in a Stream. Let\u2019s imagine the Stream as a white canvas where you and the server write the stanzas.
  • Stream, iq, message and presence are the core of XMPP. You can find everything perfectly detailed in RFC6120 XMPP can be extended to accomplish different stuff. Each extension is called XEP (XMPP Extension Protocol).
"},{"location":"tutorials/iOS_tutorial/#whats-a-jid","title":"What\u2019s a JID?","text":"

Jabber ID (JID) is how we univocally identify each individual in XMPP. It is the address to where we are going to send our stanzas.

This is how a JID looks like:

  • localpart: This is your username.
  • domainpart: Server name where the localpart resides.
  • resourcepart: This is optional, and it identifies a particular client for the user. For example: I can be logged in with andres@erlang-solutions.com on my iPhone, on my Android and on my mac at the same time\u2026 So all these will be the same localpart + domainpart but different resourcepart

I\u2019m sure you have already noticed how similar the JID looks to a standard email address. This is because you can connect multiple servers together and the messages are rooted to the right user in the right server, just as email works. Pretty cool, right?

Sometimes you will see we have a JID with just the domain part. Why?! Because it\u2019s also possible to send stanzas to a service instead of a user. A service? What\u2019s a service?! Services are different pieces of an XMPP server that offer you some special functionality, but don\u2019t worry about this right now, just remember: you can have JIDs without a localpart.

"},{"location":"tutorials/iOS_tutorial/#whats-a-stanza","title":"What\u2019s a Stanza?","text":"

Stanza is the name of the XML pieces that we are going to be sending and receiving. The defined stanzas are: <message/>, <presence/> and <iq/>.

"},{"location":"tutorials/iOS_tutorial/#message","title":"<message/>","text":"

This is a basic <message/> stanza. Every time you want to send a message to someone (a JID), you will have to send this stanza:

<message from='andres@erlang-solutions.com/iphone' to='juana@erlang-solutions.com' type='chat'>\n    <body>Hey there!</body>\n</message>\n
"},{"location":"tutorials/iOS_tutorial/#iq","title":"<iq/>","text":"

It stands for Info/Query. It\u2019s a query-action mechanism, you send an iq and you will get a response to that query. You can pair the iq-query with the iq-response using the stanza id.

For example, we send an iq to the server to do something (don\u2019t pay attention to what we want to do\u2026 you just need to know there is an iq stanza and how the mechanism works):

<iq to='erlang-solutions.com' type='get' id='1'>\n  <query xmlns='http://jabber.org/protocol/disco#items'/>\n</iq>\n

And we get back another iq with the same id with the result of the previous query:

<iq from='erlang-solutions.com' to='ramabit@erlang-solutions.com/Andress-MacBook-Air' id='1' type='result'>\n    <query xmlns='http://jabber.org/protocol/disco#items'>\n        <item jid='muc.erlang-solutions.com'/>\n        <item jid='muclight.erlang-solutions.com'/>\n        <item jid='pubsub.erlang-solutions.com'/>\n    </query>\n</iq>\n
"},{"location":"tutorials/iOS_tutorial/#presence","title":"<presence/>","text":"

Used to exchange presence information, as you could have imagined. Usually presences are sent from the client to the server and broadcasted by it. The most basic, yet valid presence, to indicate to the server that a user is available is:

<presence/>\n

After a successful connection, you are not going to receive any <message/> until you make yourself available sending the previous presence.

If you want to make yourself unavailable, you just have to send:

<presence type=\"unavailable\"></presence>\n

If we want to make the presences more useful, we can send something like this:

<presence>\n      <status>On vacation</status>\n</presence>\n
"},{"location":"tutorials/iOS_tutorial/#whats-a-stream","title":"What\u2019s a Stream?","text":"

Before answering this, let\u2019s refresh our mind. What\u2019s a Unix socket? From Wikipedia: A socket is a special file used for inter-process communication. These allows communication between two processes. So a socket is a file that can be written by two processes (in the same computer or in different computers in the same network). So the client is going to write to this file and server too.

Ok, but how is a socket related to a Stream? Well, we are going to be connected to a server using a socket, therefore we are going to have a 'shared file\u2019 between the client and the server. This shared file is a white canvas where we are going to start writing our XML stanzas. The first thing we are going to write to this file is an opening <stream> tag! And there you go\u2026 that\u2019s our stream.

Perfect, I understand what a stream is, but I still don\u2019t understand how to send a message to the server. Well, the only thing we need to do to send a message is writing a stanza in our shared file. But what happens when the server wants to send me a message? Simple: it will write the message in the 'shared file\u2019.

"},{"location":"tutorials/iOS_tutorial/#are-we-ok-so-far","title":"Are we ok so far?","text":"

I\u2019m sure at this point you have questions like:

  • \u201cWhat?! An active TCP connection open all the time? I\u2019m used to REST! How am I going to do that?!\u201d
  • Easy, you don\u2019t have to care about that any more! That\u2019s why we are going to use the library, and it will take care of that.
  • \u201cYou said nothing about how to connect to the server!\u201d
  • Believe me, you don\u2019t have to care about this either. If we start adding all this info, we are going to get crazy. Trust me, I\u2019ve been there.
  • \u201cWhat about encrypted messages? We need security! How are we going to handle this?\u201d
  • Again, you don\u2019t have to care about this at this point. Baby steps!

You just need to be able to answer: \u201cWhat\u2019s XMPP?\u201d, \u201cHow do you send a message?\u201d, \u201cHow do you change your status in XMPP?\u201d, \u201cHow do you ask something to the server?\u201d, \u201cWhat\u2019s a Stream?\u201d. If you can answer all that, you are WAY better than me when I started.

"},{"location":"tutorials/iOS_tutorial/#first-steps-installing-the-xmppframework-library","title":"First steps: installing the XMPPFramework library","text":"

Let\u2019s create a brand new Xcode project and install the library. In this tutorial we are going to be using Swift 3. The easiest way to integrate XMPPFramework to the project is using CocoaPods.

Let\u2019s create our Podfile using the pod init command in the folder where our .xcodeproj lives. There are thousands of forks but the maintained one is the original: robbiehanson/XMPPFramework.

So let\u2019s add the pod to our Podfile and remember to uncomment the use_frameworks!.

use_frameworks!\n\ntarget 'CrazyMessages' do\n    pod 'XMPPFramework', :git=> 'git@github.com:robbiehanson/XMPPFramework.git', :branch => 'master'\nend\n

Then pod install and CocoaPods is going to do its magic and create a .xcworkspace with the library integrated. Now we just need to import XMPPFramework in the files we want to use the library and that\u2019s it.

"},{"location":"tutorials/iOS_tutorial/#starting-to-build-our-instant-messaging-app","title":"Starting to build our Instant Messaging app","text":"

The most important thing in an XMPP application is the stream, that\u2019s where we are going to \u201cwrite\u201d our stanzas, so we need an object that is going to hold it. We are going to create an XMPPController class with an XMPPStream:

import Foundation\nimport XMPPFramework\n\nclass XMPPController: NSObject {\n    var xmppStream: XMPPStream\n\n    init() {\n        self.xmppStream = XMPPStream()  \n    }\n\n}\n

We are dealing with a highly asynchronous library here. For every action we are going to have a response some time in the future. To handle this XMPPFramework defines the XMPPStreamDelegate. So implementing that delegate is going to help us answer lots of different questions like: \u201cHow do I know when XMPP has successfully connected?\u201d, \u201cHow do I know if I\u2019m correctly authenticated?\u201d, \u201cHow do I know if I received a message?\u201d. XMPPStreamDelegate is your friend!

So we have our XMPPController and our XMPPStream, what do we need to do now? Configure our stream with the hostName, port and ourJID. To provide all this info to the controller we are going to make some changes to the init to be able to receive all these parameters:

enum XMPPControllerError: Error {\n    case wrongUserJID\n}\n\nclass XMPPController: NSObject {\n    var xmppStream: XMPPStream\n\n    let hostName: String\n    let userJID: XMPPJID\n    let hostPort: UInt16\n    let password: String\n\n    init(hostName: String, userJIDString: String, hostPort: UInt16 = 5222, password: String) throws {\n        guard let userJID = XMPPJID(string: userJIDString) else {\n            throw XMPPControllerError.wrongUserJID\n        }\n\n        self.hostName = hostName\n        self.userJID = userJID\n        self.hostPort = hostPort\n        self.password = password\n\n        // Stream Configuration\n        self.xmppStream = XMPPStream()\n        self.xmppStream.hostName = hostName\n        self.xmppStream.hostPort = hostPort\n        self.xmppStream.startTLSPolicy = XMPPStreamStartTLSPolicy.allowed\n        self.xmppStream.myJID = userJID\n\n        super.init()\n\n        self.xmppStream.addDelegate(self, delegateQueue: DispatchQueue.main)\n    }\n}\n

Our next step is going to actually connect to a server and authenticate using our userJID and password, so we are adding a connect method to our XMPPController.

func connect() {\n    if !self.xmppStream.isDisconnected() {\n        return\n    }\n\n   try! self.xmppStream.connect(withTimeout: XMPPStreamTimeoutNone)\n}\n

But how do we know we have successfully connected to the server? As I said earlier, we need to check for a suitable delegate method from XMPPStreamDelegate. After we connect to the server we need to authenticate so we are going to do the following:

extension XMPPController: XMPPStreamDelegate {\n\n    func xmppStreamDidConnect(_ stream: XMPPStream!) {\n        print(\"Stream: Connected\")\n        try! stream.authenticate(withPassword: self.password)\n    }\n\n    func xmppStreamDidAuthenticate(_ sender: XMPPStream!) {\n        self.xmppStream.send(XMPPPresence())\n        print(\"Stream: Authenticated\")\n    }\n}\n

We need to test this. Let\u2019s just create an instance of XMPPController in the AppDelegate to test how it works:

try! self.xmppController = XMPPController(hostName: \"host.com\",\n                                     userJIDString: \"user@host.com\",\n                                          password: \"password\")\nself.xmppController.connect()\n

If everything goes fine we should see two messages in the logs but of course that\u2019s not happening, we missed something. We never told to our xmppStream who was the delegate object! We need to add the following line after the super.init()

self.xmppStream.addDelegate(self, delegateQueue: DispatchQueue.main)\n

If we run the app again:

Stream: Connected\nStream: Authenticated\n

Success! We have our own XMPPController with a fully functional and authenticated stream!

Something that may catch your attention is how we are setting our delegate, we are not doing:

self.xmppStream.delegate = self\n

Why not? Because we can \u201cbroadcast\u201d the events to multiple delegates, we can have 10 different objects implementing those methods. Also we can tell what\u2019s the thread where we want to receive that call, in the previous example we want it in the main thread.

"},{"location":"tutorials/iOS_tutorial/#getting-a-log-in","title":"Getting a Log In","text":"

Our app is super ugly, let\u2019s put on some makeup! We have nothing but an XMPPController and a hardcoded call in the AppDelegate. I\u2019m going to create a ViewController that is going to be presented modally as soon as the app starts, that ViewController will have the necessary fields/info to log in to the server.

I\u2019m going to create a LogInViewControllerDelegate that is going to tell to our ViewController that the Log in button was pressed and that\u2019s it. In that delegate implementation we are going to create our XMPPController, add the ViewControlleras delegate of the XMPPStream and connect!

extension ViewController: LogInViewControllerDelegate {\n\n    func didTouchLogIn(sender: LogInViewController, userJID: String, userPassword: String, server: String) {\n        self.logInViewController = sender\n\n        do {\n            try self.xmppController = XMPPController(hostName: server,\n                                                     userJIDString: userJID,\n                                                     password: userPassword)\n            self.xmppController.xmppStream.addDelegate(self, delegateQueue: DispatchQueue.main)\n            self.xmppController.connect()\n        } catch {\n            sender.showErrorMessage(message: \"Something went wrong\")\n        }\n    }\n}\n

Why are we adding ViewController as a delegate of XMPPStream if our XMPPController already has that delegate implemented? Because we need to know if this connection and authentication was successful or not in our ViewController so we are able to dismiss the LogInViewController or show an error message if something failed. This is why being able to add multiple delegates is so useful.

So as I said I\u2019m going to make ViewController to comform to the XMPPStreamDelegate:

extension ViewController: XMPPStreamDelegate {\n\n    func xmppStreamDidAuthenticate(_ sender: XMPPStream!) {\n        self.logInViewController?.dismiss(animated: true, completion: nil)\n    }\n\n    func xmppStream(_ sender: XMPPStream!, didNotAuthenticate error: DDXMLElement!) {\n        self.logInViewController?.showErrorMessage(message: \"Wrong password or username\")\n    }\n\n}\n

And that\u2019s it! Our app can log in to our server as I\u2019m showing here:

"},{"location":"tutorials/iOS_tutorial/#logging","title":"Logging!","text":"

We\u2019ve been talking a lot about XMPP, stanzas and streams\u2026 but is there a way I can see the stream? Yes SR! XMPPFramework got us covered!

XMPPFramework ships with CocoaLumberJack, a pretty well known logging framework. We just need to configure it, set the logging level we want and that\u2019s it. Logs are going to start showing up!

"},{"location":"tutorials/iOS_tutorial/#configuring-cocoalumberjack","title":"Configuring CocoaLumberjack","text":"

This is a really simple task, you just need to add to your func application(application: UIApplication, didFinishLaunchingWithOptions ... method the following line (remember to import CocoaLumberjack):

DDLog.add(DDTTYLogger.sharedInstance(), with: DDLogLevel.all)\n

I\u2019m not going to paste here all the connection process log because it makes no sense to try to understand what\u2019s going on at this stage of our learning. But I think showing what some stanzas look like is a good idea. To do this I\u2019m going to be sending messages from Adium.

I\u2019m going to send this <message/>:

<message to=\"test.user@erlang-solutions.com\">\n    <body>This is a message sent from Adium!</body>\n</message>\n

Let\u2019s see how it looks like when it reaches our app:

<message xmlns=\"jabber:client\" from=\"iamadium@erlang-solutions.com/MacBook-Air\" to=\"test.user@erlang-solutions.com\">\n   <body>This is a message sent from Adium!</body>\n</message>\n

Let\u2019s send a <presence/> from Adium:

<presence>\n    <status>On vacation</status>\n</presence>\n

We are receiving:

<presence xmlns=\"jabber:client\" from=\"iamadium@erlang-solutions.com/MacBook-Air\" to=\"test.user@erlang-solutions.com\">\n   <status>On vacation</status>\n</presence>\n

No doubts at all right? We send something and we receive it on the other end! That\u2019s it!

"},{"location":"tutorials/iOS_tutorial/#test-time","title":"Test Time!","text":"

I want to be sure that you are understanding and following everything and not just copy and pasting from a tutorial (as I usually do \ud83d\ude4a). So if you are able to answer these questions you are on a good track!

  • Why am I sending a presence after successfully authenticating? What happens if I don\u2019t send it?
  • What happens if I write a wrong server URL in the Log In form? How do I fix this problem if there is a problem\u2026
  • How do I detect if suddenly the stream is disconnected from the server? (maybe a network outage?)
  • How do I detect if the user/password was wrong?

If you need help leave a message!

"},{"location":"tutorials/push-notifications/MongoosePush-setup/","title":"Push notifications with MongoosePush","text":"

MongoosePush is a simple RESTful service written in Elixir. It provides the ability to send push notifications to FCM (Firebase Cloud Messaging) and/or APNS (Apple Push Notification Service) via their HTTP/2 API.

To take advantage of MongoosePush's functionality, you will need to enable the mod_push_service_mongoosepush module: this module acts as a bridge between the push_notifications hook and MongoosePush itself.

"},{"location":"tutorials/push-notifications/MongoosePush-setup/#getting-started","title":"Getting started","text":"

To enable integration with MongoosePush, it is as simple as the next two steps. First, you need to define a pool of HTTPS connections to MongoosePush in the outgoing_pools section:

[outgoing_pools.http.mongoose_push_http]\n  scope = \"global\"\n  strategy = \"available_worker\"\n\n  [outgoing_pools.http.mongoose_push_http.connection]\n    host = \"https://localhost:8443\"\n

And second, you need to add mod_push_service_mongoosepush to the modules section in the config file:

[modules.mod_push_service_mongoosepush]\n  pool_name = \"mongoose_push_http\"\n  api_version = \"v3\"\n

Here, we assume that MongoosePush will be available on the localhost on port 8443, which is the default one \u2014\u00a0note the server option in the outgoing pool definition. Next we enable mod_push_service_mongoosepush. The first option is the name of the HTTP pool to use and the second one is the version of MongoosePush's API (\"v2\" or \"v3\" are supported).

And that's it, we've just completed the entire MongooseIM configuration. All we need to do now is to set up MongoosePush.

"},{"location":"tutorials/push-notifications/MongoosePush-setup/#starting-mongoosepush","title":"Starting MongoosePush","text":"

The easiest way to start MongoosePush is using its docker image. But before you can set MongoosePush up, you need a FCM application token and/or an APNS application certificate. You can get the FCM token here and the easiest way of getting an APNS application certificate is by running this script (please note that you need the certificate in pem format).

After you get the FCM application token and/or the APNS application certificate, you can prepare to start MongoosePush. Firstly, prepare the following files structure:

  • priv/
    • ssl/
      • rest_cert.pem - The REST endpoint certificate
      • rest_key.pem - private key for the REST endpoint certificate
    • apns/
      • prod_cert.pem - Production APNS app certificate
      • prod_key.pem - Production APNS app certificate's private key
      • dev_cert.pem - Development APNS app certificate
      • dev_key.pem - Development APNS app certificate's private key
    • fcm/
      • token.json - FCM service account JSON file

If your FCM app token is MY_FCM_SECRET_TOKEN and you have the priv directory with all certificates in the current directory, start MongoosePush with the following command:

docker run -v `pwd`/priv:/opt/app/priv \\\n  -e PUSH_FCM_APP_FILE=\"MY_FCM_SECRET_TOKEN\" \\\n  -e PUSH_HTTPS_CERTFILE=\"/opt/app/priv/ssl/rest_cert.pem\" \\\n  -e PUSH_HTTPS_KEYFILE=\"/opt/app/priv/ssl/rest_key.pem\" \\\n  -it --rm mongooseim/mongoose-push:2.0.0\n

If you don't want to use either APNS or FCM, you simply need to pass PUSH_APNS_ENABLED=0 or PUSH_FCM_ENABLED=0 respectively as additional env variables in your docker run command. For more advanced options and configuration please refer to \"Quick start / Configuring\" in MongoosePush's README.md.

When your MongoosePush docker is up and running, Push Notifications can be used in your MongooseIM instance.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/","title":"Using push notifications on the client side","text":"

There are just a few things the XMPP client application needs to receive the push notifications. Depending on whether you plan to use PubSub-full or PubSub-less configuration, some of the steps may be unnecessary.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#registering-with-a-push-service-provider","title":"Registering with a Push Service provider","text":"

First, the client application has to get a device-specific token from the Push Service Provider (FCM or APNS). This process is different, depending on the platform, so please consult your Push Service Provider's manual to see how to get this token. For example, here you can learn about setting up FCM on Android platform and here you can learn about setting up APNS on iOS platform.

After this step, your application shall be able to receive FCM or APNS token - it will be required in the next step of this tutorial.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#setting-up-an-xmpp-pubsub-node","title":"Setting up an XMPP pubsub node","text":"

This step is specific to the PubSub-full push configuration that you chose for your MongooseIM server. If you're running a PubSub-less configuration, skip to this point.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#creating-a-new-push-node","title":"Creating a new push node","text":"

In this example mypubsub.com is a domain of the MongooseIM server that has mod_pubsub enabled with the push node support. The client sends the following stanza to the server:

<iq type='set'\n    to='pubsub.mypubsub.com'\n    id='create1'>\n  <pubsub xmlns='http://jabber.org/protocol/pubsub'>\n    <create node='punsub_node_for_my_private_iphone' type='push'/>\n    <configure>\n      <x xmlns='jabber:x:data' type='submit'>\n        <field var='FORM_TYPE' type='hidden'>\n          <value>http://jabber.org/protocol/pubsub#node_config</value>\n        </field>\n        <field var='pubsub#access_model'>\n          <value>whitelist</value>\n        </field>\n        <field var='pubsub#publish_model'>\n          <value>publishers</value>\n        </field>\n      </x>\n    </configure>\n  </pubsub>\n</iq>\n

The pubsub.mypubsub.com will be used as a gateway for all notifications and will pass them through to the APNS and/or FCM.

The most important and only difference from the standard node creation is the type='push' part of the create element. According to XEP-0357: Push Notifications, a PubSub node is required to route the push notification mechanism. This implies you need a node that will handle your push notifications, hence we create a node called punsub_node_for_my_private_iphone. This node should be unique to the device and you may reuse nodes already created this way. The token obtained from APNS or FCM is a good option to ensure this uniqueness, by either using it directly or within some custom node name generation. It is also important from the security perspective to configure the node with:

  • access_model set to whitelist so only affiliated users can access the node.
  • publish_model set to publishers so only users with publisher or publisher_only role can publish notifications.
"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#adding-the-servers-jid-to-allowed-publishers","title":"Adding the server's JID to allowed publishers","text":"

Push notifications to the push node are addressed from your server's JID. If the push node was configured with the above recommended options, you need to allow your server's JID to publish notifications to that node. Considering your JID is alice@mychat.com, your server's JID is just mychat.com. The following stanza sent to the just created push node will allow your server JID to publish notifications:

<iq to='pubsub.mypubsub.com'\n    type='set'\n    id='wy6Hibg='\n    from='alice@mychat.com/resource'>\n    <pubsub xmlns='http://jabber.org/protocol/pubsub#owner'>\n        <affiliations node='punsub_node_for_my_private_iphone'>\n            <affiliation jid='mychat.com' affiliation='publish-only'/>\n        </affiliations>\n    </pubsub>\n</iq>\n
"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#enabling-push-notifications","title":"Enabling push notifications","text":"

The next and the last step is to enable push notifications on the server that handles your messages (and has mod_event_pusher_push enabled). Let's assume this server is available under the domain mychat.com.

To enable push notifications in the simplest configuration, just send the following stanza:

<iq type='set' id='x43'>\n  <enable xmlns='urn:xmpp:push:0' jid='pubsub.mypubsub.com' node='punsub_node_for_my_private_iphone'>\n    <x xmlns='jabber:x:data' type='submit'>\n      <field var='FORM_TYPE'><value>http://jabber.org/protocol/pubsub#publish-options</value></field>\n      <field var='service'><value>apns</value></field>\n      <field var='device_id'><value>your_pns_device_token</value></field>\n      <field var='silent'><value>false</value></field>\n      <field var='topic'><value>some_apns_topic</value></field>\n      <field var='priority'><value>some_priority</value></field>\n    </x>\n  </enable>\n</iq>\n

We have now enabled push notifications to be send to the pubsub.mypubsub.com domain on the node punsub_node_for_my_private_iphone created previously, or in the case of PubSub-less, for whatever unique node name we give here, for example any variation of the token obtained from APNS or FCM. Please note that publish-options are specific to various XMPP Push Services.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#publish-options","title":"Publish options","text":"

For mod_push_service_mongoosepush the next publish-options are mandatory:

  • device_id - device token (here: your_pns_device_token) that you received from your push notification service provider (as described in Registering with Push Service provider)
  • service - push notification service provider name (apns or fcm)

there are also some other publish-options supported:

  • mode - which may be either prod or dev (default to prod). Decides which connection pool type on MongoosePush shall be used. This may be used when APNS on MongoosePush is configured to work with both production and development certificate.
  • click_action - action to perform when notification is clicked on the device. activity on Android and category on iOS. Please refer to your platform / push notification service provider for more info.
  • topic - currently only used with APNS. The value is passed to APNS as topic header. For more information please refer to APNS documentation.
  • silent - if set to true, all notifications will be \"silent\". This means that only the data payload will be send to the push notifications provider with no notification. The data payload will contain all notification fields as defined in XEP-0357: Push Notifications.
  • priority \u2014 which may be either normal or high, and if not given, defaults to normal. This value will set the push notification priority. Please refer to FCM / APNS documentation for more details on those values.
  • sound - sound that should be played when a notification arrives. Please refer to FCM/APNS documentation for more details.
  • mutable_content - only applicable to APNS. If set to true, sets \"mutable-content=1\" in the APNS payload.
  • time_to_live - only applicable to FCM. Maximum lifespan of an FCM notification. Please refer to the FCM documentation for more details.

Any other publish-options are ignored by mod_push_service_mongoosepush

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#disabling-push-notifications","title":"Disabling push notifications","text":"

Disabling push notifications is very simple. Just send the following stanza to your XMPP chat server:

<iq type='set' id='x44'>\n  <disable xmlns='urn:xmpp:push:0' jid='pubsub.mypubsub.com' node='punsub_node_for_my_private_iphone'/>\n</iq>\n

You may skip the node='punsub_node_for_my_private_iphone' to globally disable push notifications on all nodes that are registered with your JID. This may be used to disable push notifications on all your devices.

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#communication-overview","title":"Communication overview","text":"

One picture is worth a thousand words, so here are two diagrams showing the typical communication when using push notifications:

"},{"location":"tutorials/push-notifications/Push-notifications-client-side/#pubsub-full","title":"PubSub-full","text":""},{"location":"tutorials/push-notifications/Push-notifications-client-side/#pubsub-less","title":"PubSub-less","text":""},{"location":"tutorials/push-notifications/Push-notifications/","title":"How to set up Push Notifications","text":"

Push notifications are the bread and butter of the modern mobile experience, and MongooseIM has support for them. When used together with MongoosePush, you get out-of-the-box push notifications for FCM (Firebase Cloud Messaging) and APNS (Apple Push Notification Service) providers. And it's easy to extend it to any other protocols of your choice.

You might also want to read about the push notification's client side configuration.

All push notification mechanisms use mod_event_pusher_push as a backend implementation, read the relevant documentation to know more about it.

"},{"location":"tutorials/push-notifications/Push-notifications/#xep-0357-push-notifications","title":"XEP-0357: Push Notifications","text":"

Server side push notification support is fully compliant with XEP-0357: Push Notifications, which defines several components that need to work together in order to provide clients with working push notifications. However, there's just one non-optimal detail required by the aforementioned XEP: that push notifications being a PubSub service \u2014 we can do better than that.

If you're already familiar with the workings of XEP-0357: Push Notifications, make sure to have a look at our PubSub-less enhancement.

As it is always said, one picture is worth a thousand words:

Who does what is highly configurable. You may use MongooseIM as the XMPP server clients connect to, and send the push XMPP stanzas to a different server that will take care of the push business; or you might use MongooseIM as the remote XMPP-PubSub server that does such business. Note that the XEP doesn't enforce the push IQ stanza format, so whichever setup is used, you need to take care of the producing and processing of these stanzas.

You might also use MongooseIM as both, or you might even do both things within a single MongooseIM node (the most common setup!). Or, for the best performance, you might just skip that PubSub node altogether. While the whole setup can be incredibly extensible, we see the following straightforward uses of it.

"},{"location":"tutorials/push-notifications/Push-notifications/#xep-0357-compliant-with-local-pubsub","title":"XEP-0357 compliant with local PubSub","text":"

This is, historically, the most common setup. It allows your clients to enable push notifications via a local PubSub, and the IQ stanza is routed internally.

A direct connection to a push service (e.g. MongoosePush) must be configured on the same MongooseIM node. Check out this tutorial on how to setup MongoosePush.

[modules.mod_pubsub]\n  plugins = [\"push\"] # mandatory minimal config\n\n[modules.mod_event_pusher.push]\n  backend = \"mnesia\" # optional, default\n  wpool.workers = 200 # optional\n  plugin_module = \"mod_event_pusher_push_plugin_defaults\" # optional, default\n
"},{"location":"tutorials/push-notifications/Push-notifications/#advantages","title":"Advantages","text":"
  • Completely XEP-0357 compliant, and therefore compatible with any compliant 3rd party client library
  • No need to have two different servers
"},{"location":"tutorials/push-notifications/Push-notifications/#drawbacks","title":"Drawbacks","text":"
  • Less efficient (PubSub has a considerable impact on heavily loaded systems)
  • More load within a single node
  • Harder to customise
"},{"location":"tutorials/push-notifications/Push-notifications/#mongooseim-as-a-pubsub-less-xmpp-server","title":"MongooseIM as a PubSub-less XMPP server","text":"

PubSub is completely bypassed and clients don't need to create a push node \u2014 if they attempt to do so, and PubSub is not configured, the server would respond with an error stanza. They only have to provide the virtual PubSub address in the enable stanza, and node name can be anything unique. In order to ensure uniqueness the APNS/FCM token can be used. Note that the token must be provided as a publish option anyway.

A direct connection to a push service (e.g. MongoosePush) must be configured on the same MongooseIM node. Check out this tutorial on how to setup MongoosePush.

[modules.mod_event_pusher.push]\n  backend = \"mnesia\" # optional, default\n  wpool.workers = 200 # optional\n  plugin_module = \"mod_event_pusher_push_plugin_defaults\" # optional, default\n  virtual_pubsub_hosts = [\"pubsub.@HOST@\"]\n
"},{"location":"tutorials/push-notifications/Push-notifications/#advantages_1","title":"Advantages","text":"
  • No need to use PubSub at all
  • More efficient (PubSub has a considerable impact on heavily loaded systems)
  • Simpler client-side usage \u2014 Read about the client side configuration here
"},{"location":"tutorials/push-notifications/Push-notifications/#drawbacks_1","title":"Drawbacks","text":"
  • If the client application is built to create the push PubSub node, this might require a migration for such client \u2014 as he attempts to create the node, the server will answer with an IQ error stanza. If migrating the client side is a problem, there's a solution for that in the module section
"},{"location":"tutorials/push-notifications/Push-notifications/#virtual-pubsub-hosts","title":"Virtual PubSub hosts","text":"

These domains will shadow any identical domain configured for PubSub, stealing any notification published to it. It enables easy migration from PubSub-full deployments to PubSub-less variants. Read more in the relevant section.

"},{"location":"tutorials/push-notifications/Push-notifications/#overview-of-all-the-involved-mongooseim-components","title":"Overview of all the involved MongooseIM components","text":"

The components that make push notifications possible in MongooseIM comprise the following architecture:

PubSub-full setup PubSub-less setup"},{"location":"tutorials/push-notifications/Push-notifications/#mod_event_pusher_push","title":"mod_event_pusher_push","text":"

The first component that we need to configure in MongooseIM is the mod_event_pusher_push module.

"},{"location":"tutorials/push-notifications/Push-notifications/#mod_push_service_mongoosepush","title":"mod_push_service_mongoosepush","text":"

A connector to MongoosePush application. You can read more about it here.

"},{"location":"tutorials/push-notifications/Push-notifications/#mod_pubsubs-push-node","title":"mod_pubsub's push node","text":"

According to the XEP-0357: Push Notifications, all notifications generated via the module we have just enabled (i.e. mod_event_pusher_push) have to be send to a push enabled publish-subscribe node. In order to allow clients to allocate such a node, we need to enable it in our mod_pubsub on the MongooseIM server that will communicate with the XMPP Push Service.

"},{"location":"user-guide/Features/","title":"MongooseIM Features","text":"

MongooseIM is Erlang Solutions' robust, scalable and efficient XMPP server, aimed at large installations. Specifically designed for enterprise purposes, it is fault-tolerant and can utilise the resources of multiple clustered machines.

Some traits that make it unique include:

  • Massive scalability: simple growth through adding nodes provides costs-effectiveness as well as great resource utilisation.
  • Platform approach: designed with consistency, end-to-end battle testing across the whole ecosystem (all server and client components, and tools) can be performed.
  • Dynamic domains: thanks to the support for multi-tenancy, it is possible to set up thousands of domains dynamically without a noticeable performance overhead.
  • Code quality: extensive refactoring, substantial optimisations, continuous integration and deployment.
  • Extensive testing: automated continuous functional code coverage, integration testing, end-to-end testing with real clients.
  • Unique openness: no proprietary extensions, fully open source, fully open standards.
  • Contributions to (XMPP Standards Foundation): implementations of XEPs, innovations contributed.
  • Professional support and flexible customer service.
  • Contributions to third party open source projects: strengthening the Erlang and XMPP ecosystems.
"},{"location":"user-guide/Features/#architecture","title":"Architecture","text":"

MongooseIM brings configurability, scalability and fault-tolerance to the core feature of XMPP \u2013 routing messages. Its architecture is based on a set of pluggable extension modules that enable different features, including:

  • Websockets: long-lived connections in the browser
  • BOSH: HTTP long-polling
  • MUC (Multi-User Chat): group chat
  • Rosters: contact list, and subscriptions to users' presences
  • MAM: Message Archive Management
  • Message Carbons: for multi-device, real-time copies of all messages
  • Last activity
  • Metrics
  • Offline messages
  • Privacy settings
  • vCards: user profiles

This modular architecture allows high customisability and easy access to the required features.

MongooseIM enables authenticating users using external or internal databases (Mnesia, RDBMS, NoSQL), LDAP, HTTP or external scripts. It also allows connecting anonymous users, when required.

For storing persistent data, MongooseIM uses Mnesia (the distributed internal Erlang database), relational databases: MySQL, PostgreSQL or NoSQL alternative: Cassandra. Please take a look at database backends configurations to learn more. If necessary, MongooseIM can be customised to work with a different database. You can contact us to learn more.

Basic MongooseIM session storage is handled in Mnesia, but using Redis is also possible. It is also possible to store user Message Archives using ElasticSearch or Cassandra.

"},{"location":"user-guide/Features/#deployment-and-management","title":"Deployment and management","text":"

MongooseIM can be deployed for a number of scenarios fitting your needs. The simplest installation setup consists of a single MongooseIM node using Mnesia, so it does not require any additional services. Such system is sufficient for fast deployment and connecting XMPP clients.

A more scalable solution would be deploying MongooseIM with an external database for persistent data. Bigger setups may require a cluster of MongooseIM nodes, and a load balancer to manage the traffic from the client applications.

A single MongooseIM node can handle as many as 2.5 million online users. Based on our load tests, for deployments with multiple nodes, we are confident that 10 million online users is well within reach. Please note that such scalability numbers depend on the selected feature set that your MongooseIM installation is running.

For more details please see our blogpost: Scaling a Mongoose: How scalable is the MongooseIM XMPP server?

If the service requires a cluster of more than 10 nodes, we recommend using Redis instead of Mnesia for session storage. To avoid a single point of failure, a master-slave Redis setup is advisable.

MongooseIM allows connecting different clusters as parts of larger systems. This feature is used in geo-localised services handling massive traffic from all over the world.

MongooseIM gathers over 50 different XMPP-related metrics, allowing close monitoring of what happens inside the nodes. To manage the users, rosters, messages and general settings, we provide a GraphQL API which can be utilized via HTTP or command-line tool mongooseimctl(see GraphQL Admin API).

Erlang Solutions also provides WombatOAM, an Erlang VM monitoring solution, that enables ops and devs to better understand what going on in a MongooseIM cluster.

For load testing we use our own tools, that enable us to validate MongooseIM's scalability, given different scenarios.

"},{"location":"user-guide/Features/#multi-tenancy-dynamic-domains","title":"Multi-tenancy (dynamic domains)","text":"

MongooseIM supports multi-tenancy. This makes it possible to set up thousands of domains dynamically without a noticeable performance overhead. On more information on how to set up this feature, see dynamic domains configuration.

"},{"location":"user-guide/Features/#integration-with-other-platform-components","title":"Integration with other platform components","text":""},{"location":"user-guide/Features/#client-applications","title":"Client applications","text":"

In order to build client applications, the MongooseIM team recommends the following libraries:

XMPP REST API iOS XMPPframework, Objective-C Jayme, Swift Android Smack, Java Retrofit, Java Web Stanza.io/Strophe.js, JavaScript"},{"location":"user-guide/Features/#mongoosepush","title":"MongoosePUSH","text":"

MongooseIM can be integrated with MongoosePush. For more details visit the push notification user guide.

"},{"location":"user-guide/Features/#mongooseice","title":"MongooseICE","text":"

You can also connect Mongoose with MongooseICE. To get started, we recommend going through this tutorial.

"},{"location":"user-guide/High-level-Architecture/","title":"High-level Architecture","text":""},{"location":"user-guide/High-level-Architecture/#inside-mongooseim","title":"Inside MongooseIM","text":""},{"location":"user-guide/High-level-Architecture/#modules","title":"Modules","text":"

At its core MongooseIM is a huge message router you can customise to fit your system's needs. You can choose and enable behaviours and functionalities by configuring any of the available modules. A wide range of options includes authentication, privacy, storage, backend integration and mobile optimisations. See Extension Modules for a full list.

Modules can be configured and started either for all virtual hosts served by the instance or with individual configuration for only some of them. Modules may depend on services and on other modules. If a module depends on other modules, required modules are started automatically with configuration provided by the dependent module. If a module requires certain services which are not running, the module will not start.

"},{"location":"user-guide/High-level-Architecture/#services","title":"Services","text":"

Services provide certain functionalities not specific to virtual hosts but rather applied to the whole instance or to modules started for various hosts. They are configured globally and launched on startup, before modules, so that needed dependencies are satisfied. A service can require other services to be operational; required services are started automatically. The required service must also be present in the server's configuration file. Modules which are not host-specific are gradually being refactored to services.

"},{"location":"user-guide/High-level-Architecture/#databases","title":"Databases","text":"

MongooseIM manages two sets of data: transient for session data management, and persistent for archive and configurations.

Please refer to Database Backends doc for more configuration information.

"},{"location":"user-guide/High-level-Architecture/#transient-databases","title":"Transient databases","text":"

In the MongooseIM architecture each MongooseIM node host has an accompanying Mnesia node.

Redis on the other hand forms a separate cluster and does not utilise MongooseIM nodes.

There is no need to set up any backups for transient data since it naturally rebuilds as clients reconnect massively.

"},{"location":"user-guide/High-level-Architecture/#persistent-databases","title":"Persistent databases","text":"

Both RDBMS/SQL (MySQL/PostgreSQL) and NoSQL (Cassandra) databases are supported.

Backups should be regular, and tested.

"},{"location":"user-guide/High-level-Architecture/#ldap-directory","title":"LDAP directory","text":"

LDAP will also run on a separate cluster.

Backups should be regular, and tested.

"},{"location":"user-guide/High-level-Architecture/#outside-mongooseim-ecosystem-in-a-datacenter","title":"Outside MongooseIM: ecosystem in a datacenter","text":""},{"location":"user-guide/High-level-Architecture/#frontend","title":"Frontend","text":"

Native clients on platforms such as Android, iOS, Windows, Linux, macOS, will preferably use a plain XMPP over TCP connections.

Since web clients cannot use TCP connections, they will preferably use XMPP over websockets, or the now less relevant XMPP over BOSH (using long-lived HTTP connections, more and more used as fallback).

Any client could use the client GraphQL API, which is using HTTP request/responses.

All these client connections will hit a frontend load balancer before reaching the MongooseIM cluster.

"},{"location":"user-guide/High-level-Architecture/#backend","title":"Backend","text":"

MongooseIM supports bilateral communication with other backend services in the datacenter infrastructure.

MongooseIM GraphQL API is available for control/management of MongooseIM's operations as well as the functional aspects.

An HTTP notification enables forwarding of the events to any other external HTTP service.

"},{"location":"user-guide/High-level-Architecture/#management-and-monitoring","title":"Management and monitoring","text":"

WombatOAM enables the monitoring and management of MongooseIM clusters, as well as RabbitMQ and any other Erlang and Elixir based system.

"},{"location":"user-guide/High-level-Architecture/#mongooseice-stunturn","title":"MongooseICE (STUN/TURN)","text":"

Available on: MongooseICE

"},{"location":"user-guide/High-level-Architecture/#mongoosepush-apns-gcm","title":"MongoosePush (APNS, GCM)","text":"

Available on: MongoosePush

"},{"location":"user-guide/High-level-Architecture/#mongooseim-in-a-worldwide-multi-datacenter-configuration","title":"MongooseIM in a worldwide, multi-datacenter configuration","text":"

The MongooseIM platform enables a service to scale worldwide, with proximity servers across continents and datacenters. It leverages the use of the open standard S2S (server-to-server) protocol.

We advise contacting us in case of such a big deployment.

"},{"location":"user-guide/Supported-XEPs/","title":"Supported XEPs","text":"XEP Name Version Status Modules 0004 Data Forms 2.13.1 complete mongoose_data_forms 0012 Last Activity 2.0 complete mod_last 0016 Privacy Lists 1.7 complete mod_privacy 0022 Message Events 1.4 complete mod_offline 0023 Message Expiration 1.3 complete mod_offline 0030 Service Discovery 2.5rc3 complete mod_disco 0045 Multi-User Chat 1.34.5 complete mod_muc 0049 Private XML Storage 1.2 complete mod_private 0050 Ad-Hoc Commands 1.3.0 complete adhoc 0054 vcard-temp 1.2 complete mod_vcard 0055 Jabber Search 1.3 complete mod_vcard 0059 Result Set Management 1.0 complete jlib 0060 Publish-Subscribe 1.25.0 complete mod_pubsub 0068 Field Standardization for Data Forms 1.3.0 complete mongoose_data_forms 0077 In-Band Registration 2.4 complete mod_register 0079 Advanced Message Processing 1.2 partial mod_amp 0082 XMPP Date and Time Profiles 1.1.1 complete mod_time 0083 Nested Roster Groups 1.0 complete mod_roster 0085 Chat State Notifications 2.1 complete mod_offline 0086 Error Condition Mappings 1.0 complete jlib 0092 Software Version 1.1 complete mod_version 0093 Roster Item Exchange 1.2 complete mod_roster 0114 Jabber Component Protocol 1.6 complete ejabberd_service 0115 Entity Capabilities 1.6.0 complete mod_caps 0124 Bidirectional-streams Over Synchronous HTTP (BOSH) 1.11.2 complete mod_bosh 0126 Invisibility 1.1 complete mod_privacy 0157 Contact Addresses for XMPP Services 1.1.1 complete mod_disco 0160 Best Practices for Handling Offline Messages 1.0.1 complete mod_offline, mod_offline_chatmarkers 0163 Personal Eventing Protocol 1.2.2 complete mod_pubsub 0170 Recommended Order of Stream Feature Negotiation 1.0 complete mongoose_c2s 0175 Best Practices for Use of SASL ANONYMOUS 1.2 complete cyrsasl_anonymous 0178 Best Practices for Use of SASL EXTERNAL with Certificates 1.2 partial cyrsasl_external 0185 Dialback Key Generation and Validation 1.0 complete mongoose_s2s_dialback 0191 Blocking Command 1.3 complete mod_blocking 0198 Stream Management 1.6.1 complete mod_stream_management 0199 XMPP Ping 2.0.1 complete mod_ping 0202 Entity Time 2.0 complete mod_time 0206 XMPP Over BOSH 1.4 complete mod_bosh 0215 External Service Discovery 1.0.0 complete mod_extdisco 0220 Server Dialback 1.1.1 complete ejabberd_s2s_out, mongoose_s2s_dialback 0237 Roster Versioning 1.3 complete mod_roster 0248 PubSub Collection Nodes 0.3.0 complete mod_pubsub 0249 Direct MUC Invitations 1.2 complete mod_muc 0277 Microblogging over XMPP 0.6.5 complete mod_pubsub 0279 Server IP Check 0.2 complete mod_sic 0280 Message Carbons 1.0.1 complete mod_carboncopy 0313 Message Archive Management 1.1.0 complete mod_mam 0333 Displayed Markers 0.4 complete mod_smart_markers 0352 Client State Indication 1.0.0 complete mod_csi 0357 Push Notifications 0.4.1 complete mod_event_pusher_push 0363 HTTP File Upload 1.1.0 complete mod_http_upload 0384 OMEMO Encryption 0.8.3 complete mod_pubsub 0386 Bind 2 0.4.0 partial mod_bind2 0388 Extensible SASL Profile 0.4.0 partial mod_sasl2 0424 Message Retraction 0.3.0 complete mod_mam"},{"location":"user-guide/Supported-standards/","title":"Supported standards","text":"
  • XMPP Core: RFC 3920, RFC 6120

    Note

    In RFC 6120 there are 3 different strategies defined in case of a session conflict (same full JID). They are described in 7.7.2.2. Conflict. MongooseIM always uses the 3rd option. It terminates the older session with a <conflict/> stream error.

  • XMPP Instant Messaging and Presence: RFC 3921, RFC 6121

  • Client connections:
    • over TCP (with TLS/STARTTLS available) as defined in RFC 6120
    • over WebSockets as defined in RFC 7395
    • over HTTP(S) long-polling (BOSH) as defined in XEP-0124: Bidirectional-streams Over Synchronous HTTP (BOSH) and XEP-0206: XMPP Over BOSH
    • GraphQL API
    • REST API (deprecated)
  • Server/backend connections:
    • GraphQL API
    • REST API (deprecated)
  • Configurable database backends:
    • Transient:
      • Mnesia
      • Redis
    • Persistent:
      • RDBMS: MySQL, PostgreSQL, generic ODBC
      • NoSQL: Cassandra
  • Integration with third-party services
    • Amazon Simple Notification Service
"},{"location":"user-guide/Supported-standards/#supported-xeps","title":"Supported XEPs","text":"XEP Name Version Status Modules 0004 Data Forms 2.13.1 complete mongoose_data_forms 0012 Last Activity 2.0 complete mod_last 0016 Privacy Lists 1.7 complete mod_privacy 0022 Message Events 1.4 complete mod_offline 0023 Message Expiration 1.3 complete mod_offline 0030 Service Discovery 2.5rc3 complete mod_disco 0045 Multi-User Chat 1.34.5 complete mod_muc 0049 Private XML Storage 1.2 complete mod_private 0050 Ad-Hoc Commands 1.3.0 complete adhoc 0054 vcard-temp 1.2 complete mod_vcard 0055 Jabber Search 1.3 complete mod_vcard 0059 Result Set Management 1.0 complete jlib 0060 Publish-Subscribe 1.25.0 complete mod_pubsub 0068 Field Standardization for Data Forms 1.3.0 complete mongoose_data_forms 0077 In-Band Registration 2.4 complete mod_register 0079 Advanced Message Processing 1.2 partial mod_amp 0082 XMPP Date and Time Profiles 1.1.1 complete mod_time 0083 Nested Roster Groups 1.0 complete mod_roster 0085 Chat State Notifications 2.1 complete mod_offline 0086 Error Condition Mappings 1.0 complete jlib 0092 Software Version 1.1 complete mod_version 0093 Roster Item Exchange 1.2 complete mod_roster 0114 Jabber Component Protocol 1.6 complete ejabberd_service 0115 Entity Capabilities 1.6.0 complete mod_caps 0124 Bidirectional-streams Over Synchronous HTTP (BOSH) 1.11.2 complete mod_bosh 0126 Invisibility 1.1 complete mod_privacy 0157 Contact Addresses for XMPP Services 1.1.1 complete mod_disco 0160 Best Practices for Handling Offline Messages 1.0.1 complete mod_offline, mod_offline_chatmarkers 0163 Personal Eventing Protocol 1.2.2 complete mod_pubsub 0170 Recommended Order of Stream Feature Negotiation 1.0 complete mongoose_c2s 0175 Best Practices for Use of SASL ANONYMOUS 1.2 complete cyrsasl_anonymous 0178 Best Practices for Use of SASL EXTERNAL with Certificates 1.2 partial cyrsasl_external 0185 Dialback Key Generation and Validation 1.0 complete mongoose_s2s_dialback 0191 Blocking Command 1.3 complete mod_blocking 0198 Stream Management 1.6.1 complete mod_stream_management 0199 XMPP Ping 2.0.1 complete mod_ping 0202 Entity Time 2.0 complete mod_time 0206 XMPP Over BOSH 1.4 complete mod_bosh 0215 External Service Discovery 1.0.0 complete mod_extdisco 0220 Server Dialback 1.1.1 complete ejabberd_s2s_out, mongoose_s2s_dialback 0237 Roster Versioning 1.3 complete mod_roster 0248 PubSub Collection Nodes 0.3.0 complete mod_pubsub 0249 Direct MUC Invitations 1.2 complete mod_muc 0277 Microblogging over XMPP 0.6.5 complete mod_pubsub 0279 Server IP Check 0.2 complete mod_sic 0280 Message Carbons 1.0.1 complete mod_carboncopy 0313 Message Archive Management 1.1.0 complete mod_mam 0333 Displayed Markers 0.4 complete mod_smart_markers 0352 Client State Indication 1.0.0 complete mod_csi 0357 Push Notifications 0.4.1 complete mod_event_pusher_push 0363 HTTP File Upload 1.1.0 complete mod_http_upload 0384 OMEMO Encryption 0.8.3 complete mod_pubsub 0386 Bind 2 0.4.0 partial mod_bind2 0388 Extensible SASL Profile 0.4.0 partial mod_sasl2 0424 Message Retraction 0.3.0 complete mod_mam"},{"location":"user-guide/Supported-standards/#supported-open-extensions","title":"Supported Open Extensions","text":"Name Module MUC Light mod_muc_light Inbox mod_inbox Token-based reconnection mod_auth_token, mod_keystore MAM extensions mam"}]} \ No newline at end of file diff --git a/latest/sitemap.xml b/latest/sitemap.xml index 9f6158ef7..431928897 100644 --- a/latest/sitemap.xml +++ b/latest/sitemap.xml @@ -2,742 +2,742 @@ https://esl.github.io/MongooseDocs/latest/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/Contributions/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/History/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/authentication-methods/anonymous/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/authentication-methods/dummy/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/authentication-methods/external/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/authentication-methods/http/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/authentication-methods/jwt/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/authentication-methods/ldap/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/authentication-methods/pki/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/authentication-methods/rdbms/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/Erlang-cookie-security/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/Modules/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/Services/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/TLS-hardening/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/access/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/acl/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/auth/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/configuration-files/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/database-backends-configuration/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/general/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/host_config/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/internal-databases/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/listen/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/outgoing-connections/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/release-options/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/s2s/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/configuration/shaper/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/Basic-iq-handler/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/Bootstrap-Scripts/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/Hooks-and-handlers/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/OpenSSL-and-FIPS/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/SCRAM-serialization/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/Stanza-routing/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/Testing-MongooseIM/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/accumulators/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/domain_management/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/hooks_description/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/logging/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/mod_amp_developers_guide/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/mod_muc_light_developers_guide/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/mongoose_wpool/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/release_config/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/developers-guide/xep_tool/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/getting-started/Installation/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/getting-started/Quick-setup/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/graphql-api/Admin-GraphQL/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/graphql-api/User-GraphQL/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/listeners/listen-c2s/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/listeners/listen-components/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/listeners/listen-http/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/listeners/listen-s2s/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/3.1.1_3.2.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/3.3.0_3.4.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/3.5.0_3.6.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/3.6.0_3.7.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/3.7.0_4.0.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/4.0.0_4.0.1/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/4.0.1_4.1.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/4.1.0_4.2.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/4.2.0_5.0.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/5.0.0_5.1.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/5.1.0_6.0.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/6.0.0_6.1.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/6.1.0_6.2.0/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/6.2.0_6.2.1/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/6.2.1_x.x.x/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/migrations/jid-from-mam-muc-script/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_adhoc/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_amp/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_auth_token/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_bind2/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_blocking/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_bosh/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_cache_users/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_caps/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_carboncopy/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_csi/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_disco/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_domain_isolation/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_event_pusher/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_event_pusher_http/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_event_pusher_push/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_event_pusher_rabbit/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_event_pusher_sns/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_extdisco/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_global_distrib/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_http_upload/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_inbox/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_jingle_sip/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_keystore/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_last/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_mam/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_muc/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_muc_light/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_muc_log/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_offline/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_offline_stub/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_ping/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_presence/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_privacy/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_private/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_pubsub/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_push_service_mongoosepush/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_register/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_roster/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_sasl2/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_shared_roster_ldap/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_sic/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_smart_markers/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_stream_management/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_time/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_vcard/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/modules/mod_version/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/open-extensions/inbox/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/open-extensions/mam/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/open-extensions/muc_light/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/open-extensions/smart-markers/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/open-extensions/token-reconnection/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/Cluster-configuration-and-node-management/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/Cluster-management-considerations/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/Cluster-restart/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/Humio/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/Logging-%26-monitoring/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/Logging-fields/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/Logging/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/MongooseIM-metrics/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/Rolling-upgrade/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/System-Metrics-Privacy-Policy/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/gdpr-considerations/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/known-issues/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/operation-and-maintenance/tls-distribution/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/rest-api/Administration-backend/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/rest-api/Client-frontend/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/CETS-configure/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/Docker-build/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/How-to-build/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/ICE_tutorial/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/Jingle-SIP-setup/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/client-certificate/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/iOS_tutorial/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/push-notifications/MongoosePush-setup/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/push-notifications/Push-notifications-client-side/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/tutorials/push-notifications/Push-notifications/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/user-guide/Features/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/user-guide/High-level-Architecture/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/user-guide/Supported-XEPs/ - 2024-07-31 + 2024-08-06 daily https://esl.github.io/MongooseDocs/latest/user-guide/Supported-standards/ - 2024-07-31 + 2024-08-06 daily \ No newline at end of file diff --git a/latest/sitemap.xml.gz b/latest/sitemap.xml.gz index 7570fba6494dbdb712011f94aac5a3c08d2b8546..4a80d4f0252fc88271945921bd3aa7c9bb32fce3 100644 GIT binary patch delta 1514 zcmV>42uj0ABzYGfM>Ch2OodCZPpD*iY`l+3<>JiDX=?%rj8}n6pzY}B<`;t zDcMaNFDQlq1K!vOY*Qb3UWy{mIi$|f+q>56-H`K|?eYG2bg;iiR&k|meZ2qq`n`Cw z|Muj?QFO*r*h@QGf4tv>(EVBzx3{+=uLHHR^CNb3;Z>7<_+njH9E^V1KY4$#mtOe< z!?*r*WLTZ#v-orx@@7=)(8T#jb8*GCX7=8**buu}ayb=d* z#KB?OWnK1QH`Pd6)56i$lS*o{Iw~HN3|z%`u>Xd5$~HJXk0I5gM*u$8KCoLMpvo8; zvOz1EYge=s8m@dHE!$Pg(F1>LsAAh9Ujw=eRBRWzj;_Zq&YVgAM=0i{CgDj%r^58E#B$yIk+8vTC_#)#Y15v2Vlg-fFwhy%GJ;X0M(ae1@z#TBHCKlm*!0a97^{EjG5^P?)iN1>Awo>` z)yeT`z8WDrUA(JWZN(qCmktPiTDmf$Vi5Jh(Zvg51`&!x$oetzs7jXob$#?G*; z(9pG=^Bo%Wm*igx3YypjtUTYSYHPT&Tw%GUM!5Z1d3hy z5aEXR4lsY5)j9HH72$E~8>v>1*a5G$&G+>sSIJD~Ob8W5PU)njmwpu`R~l9&G&zVCm|1yFbXSU|seML4=oegdLmoi{2c z8^{x*Ns!Dw>8Xj?oY_vp)bC}vB7rq@{aEI`o~+pGiZ)Bxo`pO|{iAuur%SF8g&kigZCH7zum%0CM&*dp%peB`{ZPAl+>@`;Vf4J>xGlNdPxo)$A( zr4ExE1sZ?fTA{s<1g<0)FV+h;B8qLAZI=5-@-zIPZJi+$O2Vnp*|c0Z&X)v&J;ZFB zNaKssv+LP+KY*~QR_o_{D*_{uU!0zmn_pPufA))(#Oz{mc3#5y$C2 QQqOquH-Ou|=aOy!0J=f>^Z)<= delta 1514 zcmV>42uj0ABzYGfPSfw2OobsZMqFJiDX=?%rj8}n6pzY}IPR|> zDcMb&EGUKn1K!vOY*Qb3UWy{mIi$|<+uPRc-H`K|?VJ7a=wN@3tl~=B`py35>-Xa4 z{kJDCj-xZ4!d}|h`ZxPc2;Hwmv0AN0UI%Jr=SS@7!mB3z@WrOEI2ir1fAW7~FTL^! zhHw4r*swauXYuJYSS^-c_|KF zi^Fl+Wm9%Xd5$~HJXk0I5gM*u$8KCoLOpvo8; zvOz1EYge=s8m@dHE!%a=(F1>LsAAh9Ujw=gY}*sytzeRyPJk_*W2jm8W}3C6V8SB1DjXTg{SbGc0{jFKx{)0@1n!D%$Z&%7 zqK>+v;y3BF68dYhGHqp0MKA{+ob1F*Io$;hsp35OhhC)1u)P+TGCp)3af22?R^RHe~%6P}Bt zVW)D{F&jt;8!reb4)93AEp=r8ssEZnKSKK;U7)JCu<4sIFjjxlL;jCZt7S6&LWG#= ztCQo?d^JLLx_DQ$+KNAN$;V4H_|M7SMq(dAUD;0?z6pg9ym-vbZO1m5beQ-k3nCtY zg9N);T@QgSUmT9cqj7mSnv4$M$PY&ciTouX`KzA%2$1}!Cw~n{o|wzjR^Z5c=AL{E zNIp*FW57Be_jP|h0VGe%&%3gF}v?DFZJtmc863pKDu9;Gol? zOOV-*9d7mw8ncqBp)z`q#R$9T9U!xtvsK)!;mEy4L#}_cR}2R|%))EWIP)v_`RRsi zD7)wzjJ0M@v6m=8=O+c^4`=~S&gVKnOYjzMh@#s7*>To}=hEa(KR@=4MxT!=V`o@a zXz1F``4$cOOY$!am0rX3@k?6gJPCxxok`G`TiL=PM+JyHtgnLvXSg^8FBV2y0>v(U zh;YMu2N-|O>Ku8pitxDgjZ|w$?0{F>aqa={$K;-se#jb8ZWlx&I?RshywnUX)Z!SC z+DmWXrc-<)9c^p8M9FWoqd>#iw7!)tB>%xJ0cTcxa3ou>C&BIu2)czENN<#MaIv3F zo=ya8?#POq9nkuI4G2uw>LEBtP~s6bl9&GwzVCm|1yFbXP(Z(WML4=kegdLmlQ$|S z8^{x*Ns!Dw>8Xj?oY_so)bC}vB7rq@{aEImo~+pGindGHo`pO|{iAuufzSF8g&kigZCH7zum%0CM&*dp%oeB`{ZPAl+>@`;Vf4J>xGlNdPxo)$A( zrw)@G1sZ>^tkB*^0#_1@7n_9}5ydvmHp|^3`5AuDw#g6*CE?WQY+5cH=Su>?9%435 zr18b++4bzZA3#{_sWf?%Vji^ttMzkUiNJ{D7pG_C_7@iUpZ%gGF}s-JBrmz6S3@VG z$OXN~ng7A=RsCTG+HUg1rm(!J_TKQ>9x%