diff --git a/.gitignore b/.gitignore
index c007f3ca..c62fc224 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,13 @@
-**/.idea/
target/
+pom.xml.tag
+pom.xml.releaseBackup
+pom.xml.versionsBackup
+pom.xml.next
+release.properties
+dependency-reduced-pom.xml
+buildNumber.properties
+.mvn/timing.properties
+**/.idea/
*.iml
**/nbproject/**
**/nbactions.xml
@@ -8,7 +16,8 @@ target/
.project
.classpath
.settings/
+.checkstyle
hs_err*.log
/documentation/static
/documentation/site
-.DS_Store
+.DS_Store
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
index 3680405e..9637f2a8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,30 +1,49 @@
language: java
-sudo: false
+sudo: required
dist: trusty
-script: mvn -Psafer -Pintegration -Passembler -B -e -T 1C verify
jdk: oraclejdk8
-env:
- # the SONAR_TOKEN variable
- - secure: "eQaksffQhrlaAKEFWIGR7Wbo01QKscfPl7MOQHQAytP74uU6in1VcPITdHr2UgTH/PhjMJ5Uevw7mrzDN+pDE8csNxVF7JozEAIh6DYhmBmmBOYu3+AypDqdtaBOmgAPkTLxSthiy5hsmOgCjfXPGrWqpQJ918n0RtdIL1p7W4L5N3I2yDcQOpFSXXizWADpGH/fErhihKdepNCco0JCfxLmpFa+i+goqsF1EEJPb2Ylz4LYhlez2NqULGQWio2+ucrYLDZHFTrdRyQ9Tq/RA62zdynmWQAYuE5nYmO1vmvk2mR+xQpIyevvAQY4RmffYKkK/Hq334H8rIH7etCn/e0fzaLnOveyIttoG0yaqpY/mPIZkOQBV0pSjCFONvt0gsgFfPYv5sHvRBM2dj1knwOLqwhqseR6en8+TaqeP2aYj/ittU3+7QJ9yHorfAYG14ofxm+Ue4o9wJ5FsyAzw5IpYnu28WRGy7+7kFfzo1prNh7dlqjDKrBMeK292seVPH+RDMaIs2ZAzfUZElhOtqvXGsuBfDtO2FG7L9ke+tbVB/z/srM0cXPJW0xcPdoeVgoH5+CyLTaR4+8tT0recQwfKrl7zluwKk+zWmxCs5BuV+JHcWrgC+sD0iKvOHbf/w+s5bLOs0kB4YjoiUZ0pK+9NphR/Mew7shnTFZi63o="
- # TODO: Add a SONAR_GITHUB_TOKEN to support Sonar on pull requests (or wait until they improve their compatibility)
+before_install:
+ - wget https://archive.apache.org/dist/maven/maven-3/3.2.2/binaries/apache-maven-3.2.2-bin.tar.gz
+ - tar xf apache-maven-3.2.2-bin.tar.gz
+ - export M2_HOME=$PWD/apache-maven-3.2.2
+ - export PATH=$M2_HOME/bin:$PATH
+#Required to use Codacy
+ - sudo apt-get install jq
+ - wget -O ~/codacy-coverage-reporter-assembly-latest.jar $(curl https://api.github.com/repos/codacy/codacy-coverage-reporter/releases/latest | jq -r .assets[0].browser_download_url)
addons:
- sonarqube:
- branches:
- - devel
- - master
apt:
packages:
- oracle-java8-installer
+
+cache:
+ directories:
+ - $HOME/.m2/repository
+
+before_cache:
+ - rm -f $HOME/.m2/repository/com/torodb
before_script:
-- export GIT_BRANCH=$TRAVIS_BRANCH
+ - export GIT_BRANCH=$TRAVIS_BRANCH
-cache:
- directories:
- - '$HOME/.sonar/cache'
+script: |
+ if [ "$TRAVIS_EVENT_TYPE" == cron ] && [ "$TRAVIS_BRANCH" == devel ]
+ then
+ bash .travis/build-packages
+ else
+ mvn -Psafer -Pintegration -B -e -T 1C -Dcheckstyle.consoleOutput=false verify
+ fi
after_success:
- - if [[ $TRAVIS_REPO_SLUG = torodb/torodb ]]; then bash <(curl -s https://codecov.io/bash) || echo 'Codecov did not collect coverage reports'; else echo 'Codecov not notified'; fi
- - if [[ $TRAVIS_REPO_SLUG = torodb/torodb ]]; then mvn sonar:sonar || echo 'Error while notifying SonarQube'; else echo 'SonarQube not notified'; fi
+ - |
+ if [ "$TRAVIS_EVENT_TYPE" != cron ] && [ "$TRAVIS_REPO_SLUG" == torodb/stampede ]
+ then
+ # Upload reports to Codecov
+ bash <(curl -s https://codecov.io/bash) || echo 'Codecov did not collect coverage reports';
+ # Upload reports to Codacy
+ java -cp ~/codacy-coverage-reporter-assembly-latest.jar com.codacy.CodacyCoverageReporter -l Java -r reporting/target/site/jacoco-aggregate/jacoco.xml || echo 'Codacy report fail'
+ else
+ echo 'Skipping a metrics reports because this repo/build is not permitted'
+ fi
+
\ No newline at end of file
diff --git a/.travis/build-packages b/.travis/build-packages
new file mode 100644
index 00000000..c3a053ca
--- /dev/null
+++ b/.travis/build-packages
@@ -0,0 +1,169 @@
+#!/bin/bash
+
+set -e +x +v +o history
+
+function finish {
+ rm -f ~/.aws/config
+
+ if [ -f ~/.m2/settings.xml ]
+ then
+ rm ~/.m2/settings.xml
+ fi
+ if [ -f ~/.m2/settings.xml.bak ]
+ then
+ mv ~/.m2/settings.xml.bak ~/.m2/settings.xml
+ fi
+
+ if [ -f ~/.ssh/id_rsa ]
+ then
+ rm -f ~/.ssh/id_rsa
+ fi
+ if [ -f ~/.ssh/id_rsa.bak ]
+ then
+ mv ~/.ssh/id_rsa.bak ~/.ssh/id_rsa
+ fi
+ if [ -f ~/.ssh/id_rsa.pub ]
+ then
+ rm -f ~/.ssh/id_rsa.pub
+ fi
+ if [ -f ~/.ssh/id_rsa.pub.bak ]
+ then
+ mv ~/.ssh/id_rsa.pub.bak ~/.ssh/id_rsa.pub
+ fi
+
+ if [ -f ~/.ssh/sign ]
+ then
+ rm -f ~/.ssh/sign
+ fi
+ if [ -f ~/.ssh/sign.bak ]
+ then
+ mv ~/.ssh/sign.bak ~/.ssh/sign
+ fi
+ if [ -f ~/.ssh/sign.pub ]
+ then
+ rm -f ~/.ssh/sign.pub
+ fi
+ if [ -f ~/.ssh/sign.pub.bak ]
+ then
+ mv ~/.ssh/sign.pub.bak ~/.ssh/sign.pub
+ fi
+
+ gpg --delete-secret-key --batch --yes "$launchpad_sign_public_key_fingerprint" || true
+ gpg --delete-key --batch --yes "$launchpad_sign_public_key_fingerprint" || true
+
+ rm -f ~/.config/copr
+}
+
+trap finish SIGINT SIGTERM EXIT
+
+
+echo "Building binary package"
+
+mkdir -p ~/.aws
+echo "
+[default]
+aws_access_key_id=$aws_access_key_id
+aws_secret_access_key=$aws_secret_access_key
+region=eu-west-1
+output=json
+" > ~/.aws/config
+
+set -x
+mvn package -f main/pom.xml -P assembler -Ds3.push=true -DskipTests
+set +x
+
+
+echo "Building docker package"
+
+mkdir -p ~/.m2
+if [ -f ~/.m2/settings.xml ]
+then
+ mv ~/.m2/settings.xml ~/.m2/settings.xml.bak
+fi
+echo "
+
+
+
+ docker.io
+ $docker_username
+ $docker_password
+
+
+
+" > ~/.m2/settings.xml
+
+set -x
+mvn package -f main/pom.xml -P docker -Ddocker.skip.push=false -DskipTests
+set +x
+
+
+echo "Building deb package"
+
+if [ -f ~/.ssh/id_rsa ]
+then
+ mv ~/.ssh/id_rsa ~/.ssh/id_rsa.bak
+fi
+if [ -f ~/.ssh/id_rsa.pub ]
+then
+ mv ~/.ssh/id_rsa.pub ~/.ssh/id_rsa.pub.bak
+fi
+
+echo "$launchpad_private_key" > ~/.ssh/id_rsa
+echo "$launchpad_public_key" > ~/.ssh/id_rsa.pub
+chmod 600 ~/.ssh/id_rsa
+chmod 644 ~/.ssh/id_rsa.pub
+
+echo "$launchpad_sign_private_key" > ~/.ssh/sign
+echo "$launchpad_sign_public_key" > ~/.ssh/sign.pub
+chmod 600 ~/.ssh/sign
+chmod 644 ~/.ssh/sign.pub
+
+gpg --import ~/.ssh/sign.pub
+gpg --import ~/.ssh/sign
+echo "$(echo "$launchpad_sign_public_key_fingerprint"|tr -d '[:space:]'):6:"|gpg --import-ownertrust
+
+echo "
+[8kdata-release]
+fqdn = ppa.launchpad.net
+method = ftp
+incoming = ~8kdata/ubuntu/ppa/
+login = anonymous
+allow_unsigned_uploads = 0
+
+[8kdata-devel]
+fqdn = ppa.launchpad.net
+method = ftp
+incoming = ~8kdata/ubuntu/ppa-dev/
+login = anonymous
+allow_unsigned_uploads = 0
+" > ~/.dput.cf
+
+set -x
+mvn package -f main/pom.xml -P deb -Dlaunchpad.push=true -DskipTests -Dpackage.name=torodb-stampede
+mvn package -f main/pom.xml -P deb -Dlaunchpad.push=true -DskipTests -Dpackage.name=torodb-stampede-postgres
+set +x
+
+
+echo "Building snap package"
+
+set -x
+mvn package -f main/pom.xml -P snap -Dlaunchpad.push=true -DskipTests -Dpackage.name=torodb-stampede
+mvn package -f main/pom.xml -P snap -Dlaunchpad.push=true -DskipTests -Dpackage.name=torodb-stampede-postgres
+set +x
+
+
+echo "Building rpm package"
+
+mkdir -p ~/.config
+echo "
+[copr-cli]
+login = $copr_login
+username = $copr_user
+token = $copr_token
+copr_url = https://copr.fedorainfracloud.org
+" > ~/.config/copr
+
+set -x
+mvn package -f main/pom.xml -P rpm -Dcopr.push=true -DskipTests -Dpackage.name=torodb-stampede
+mvn package -f main/pom.xml -P rpm -Dcopr.push=true -DskipTests -Dpackage.name=torodb-stampede-postgres
+set +x
diff --git a/README.md b/README.md
index f2be53c8..f79dc081 100644
--- a/README.md
+++ b/README.md
@@ -1,43 +1,59 @@
-# ToroDB
+# ToroDB Stampede
-[![Master branch build status](https://travis-ci.org/torodb/torodb.svg?branch=master)](https://travis-ci.org/torodb/torodb) [![Quality Gate](https://sonarqube.com/api/badges/gate?key=com.torodb:torodb-pom)](https://sonarqube.com/dashboard/index/com.torodb:torodb-pom)
+> Transform your NoSQL data from a MongoDB replica set into a relational database in PostgreSQL.
-ToroDB is a technology designed to fulfill the gap between document oriented
-and SQL databases. There are two products that use this technology: ToroDB
-Server and ToroDB Stampede. Both platforms are open source and any feedback,
-contributions, help and/or patches are very welcome. Please join the
-[torodb-dev][2] mailing list for further discussion.
+There are other solutions that are able to store the JSON document in a
+relational table using PostgreSQL JSON support, but it doesn't solve the real
+problem of 'how to really use that data'. ToroDB Stampede replicates the
+document structure in different relational tables and stores the document data
+in different tuples using those tables.
-For more information, please see [ToroDB's website][1]
+![](documentation/docs/images/tables_distribution.jpeg)
-## ToroDB Server
-It is a MongoDB-compatible server that supports speaks the MongoDB Wire
-Protocol (and therefore can be used with the same drivers used to connect to
-any standard MongoDB server) but stores your data into a reliable and trusted
-ACID database.
+## Installation
-More information about ToroDB Server can be found on [its own folder](/server)
-in this repository.
+Due to the use of different external systems like MongoDB and PostgreSQL, the
+installation requires some previous steps. Take a look at out
+[quickstart][1] in the
+documentation.
-## ToroDB Stampede
-ToroDB Stampede is a business analytic solution that replicates your data in
-real time from a MongoDB replica set into a SQL database, allowing you to use
-any business intelligence products (like [Tableau][3] or [Pentaho][4]) to
-analyze NoSQL data.
+## Usage example
-More information about ToroDB Stampede can be found on
-[its own folder](/stampede) in this repository.
+MongoDB is a great idea, but sooner or later some kind of business
+intelligence, or complex aggregated queries are required. At this point MongoDB
+is not so powerful and ToroDB Stampede borns to solve that problem (see
+[our post about that][2]).
-## Code QA
- * Master branch build status: [![Master branch build status](https://travis-ci.org/torodb/torodb.svg?branch=master)](https://travis-ci.org/torodb/torodb) [![Quality Gate](https://sonarqube.com/api/badges/gate?key=com.torodb:torodb-pom)](https://sonarqube.com/dashboard/index/com.torodb:torodb-pom)
- * Devel branch build status : [![Devel branch build status](https://travis-ci.org/torodb/torodb.svg?branch=devel)](https://travis-ci.org/torodb/torodb) [![Quality Gate](https://sonarqube.com/api/badges/gate?key=com.torodb:torodb-pom:devel)](https://sonarqube.com/dashboard/index/com.torodb:torodb-pom:devel)
+The kind of replication done by ToroDB Stampede allows the execution of
+aggregated queries in a relational backend (PostgreSQL) with a noticeable time
+improvement.
-## Are you a developer? Want to contribute? Questions about the source code?
+A deeper explanation is available in our
+[how to use][3] section in the
+documentation.
-Please see [CONTRIBUTING][5].
+## Development setup
-[1]: http://www.torodb.com
-[2]: https://groups.google.com/forum/#!forum/torodb-dev
-[3]: http://www.tableau.com
-[4]: http://www.pentaho.com/
-[5]: https://github.com/torodb/torodb/blob/master/CONTRIBUTING.md
+As it was said in the installation section, the requirements of external
+systems can make more difficult to explain briefly how to setup the development
+environment here. So if you want to take a look on how to prepare your
+development environment, take a look to our
+[documentation][4].
+
+## Release History
+
+* 1.0.0-beta2
+ * Released on April 06th 2017
+* 1.0.0-beta1
+ * Released on December 30th 2016
+
+## Meta
+
+ToroDB – [@nosqlonsql](https://twitter.com/nosqlonsql) – info@8kdata.com
+
+Distributed under the GNU AGPL v3 license. See ``LICENSE`` for more information.
+
+[1]: https://www.torodb.com/stampede/docs/quickstart
+[2]: https://www.8kdata.com/blog/the-conundrum-of-bi-aggregate-queries-on-mongodb/
+[3]: https://www.torodb.com/stampede/docs/how-to-use
+[4]: https://www.torodb.com/stampede/docs/installation/previous-requirements/
diff --git a/RELEASE.md b/RELEASE.md
new file mode 100644
index 00000000..465ffbcd
--- /dev/null
+++ b/RELEASE.md
@@ -0,0 +1,29 @@
+## Release Notes for Stampede 1.0.0-beta2
+
+### Changes
+
+* Add support for MongoDB 3.4
+ * Deal with MongoDB views
+ * Support BSON Type Decimal128
+* Support Sharding Replication. ToroDB Stampede can replicate from N shards into the same ToroDB database.
+ * Adjust Guice to provide a better way to inject different metrics and loggers
+ * Adapt metrics and logging so each shard has their own values
+ * Adapt the Data Import Mode to the sharding model
+* Stampede Packaging
+ * RPM package
+ * DEB package
+ * Snap package
+* Allow SSL connection to the backend (PostgreSQL)
+* Add FlexyPool to ToroDB
+* Integration Tests
+* Support all BSON types
+* Deal with system collections
+* Unify logging system and improve error messages
+* Calibrate maximum threads using also connection pool size
+* Review and test Windows/Mac installation/configuration documentation
+* Improve ToroDB Parameter configuration
+
+
+### Bugs Fixed
+
+* Stampede did not support documents whose '\_id' is a container (object or array)
diff --git a/build-tools/pom.xml b/build-tools/pom.xml
deleted file mode 100644
index 33032fcd..00000000
--- a/build-tools/pom.xml
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
- 4.0.0
- com.torodb
- build-tools
- 0.50.0
- jar
-
- ToroDB: Build Tools
- A project used to store resources and tools that the build
- process can import as a dependency
-
-
- UTF-8
- true
- true
-
-
\ No newline at end of file
diff --git a/build-tools/src/main/resources/com/torodb/buildtools/checkstyle.xml b/build-tools/src/main/resources/com/torodb/buildtools/checkstyle.xml
deleted file mode 100644
index 01a2ac87..00000000
--- a/build-tools/src/main/resources/com/torodb/buildtools/checkstyle.xml
+++ /dev/null
@@ -1,246 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/build-tools/src/main/resources/com/torodb/buildtools/torodb-license.txt b/build-tools/src/main/resources/com/torodb/buildtools/torodb-license.txt
deleted file mode 100644
index 54d73451..00000000
--- a/build-tools/src/main/resources/com/torodb/buildtools/torodb-license.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-${project.name}
-Copyright © ${project.inceptionYear} ${owner} (${email})
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
\ No newline at end of file
diff --git a/stampede/documentation/README.md b/documentation/README.md
similarity index 100%
rename from stampede/documentation/README.md
rename to documentation/README.md
diff --git a/stampede/documentation/css/8kdata.css b/documentation/css/8kdata.css
similarity index 100%
rename from stampede/documentation/css/8kdata.css
rename to documentation/css/8kdata.css
diff --git a/stampede/documentation/diagrams_soures/diagrams.key b/documentation/diagrams_soures/diagrams.key
similarity index 100%
rename from stampede/documentation/diagrams_soures/diagrams.key
rename to documentation/diagrams_soures/diagrams.key
diff --git a/stampede/documentation/diagrams_soures/toro_stampede_structure.drawing b/documentation/diagrams_soures/toro_stampede_structure.drawing
similarity index 100%
rename from stampede/documentation/diagrams_soures/toro_stampede_structure.drawing
rename to documentation/diagrams_soures/toro_stampede_structure.drawing
diff --git a/stampede/documentation/docs/about.md b/documentation/docs/about.md
similarity index 72%
rename from stampede/documentation/docs/about.md
rename to documentation/docs/about.md
index 8346782b..bf7f06f4 100644
--- a/stampede/documentation/docs/about.md
+++ b/documentation/docs/about.md
@@ -4,7 +4,8 @@ Connected to a MongoDB replica set, ToroDB Stampede is able to replicate the NoS
![ToroDB Stampede Structure](images/toro_stampede_structure.jpg)
-There are other solutions that are able to store the JSON document in a relational table using PostgreSQL JSON support, but it doesn't solve the real problem of 'how to really use that data'. ToroDB Stampede replicates the document structure in different relational tables and stores the document data in different tuples using those tables.
+There are other solutions that are able to store the JSON document in a relational table using PostgreSQL JSON support, but it doesn't solve the real problem of 'how to really use that data'.
+ToroDB Stampede replicates the document structure in different relational tables and stores the document data in different tuples using those tables.
![Mapping example](images/toro_stampede_mapping.jpg)
@@ -19,7 +20,6 @@ Not everything could be perfect and there are some known limitations from ToroDB
* If character `\0` is used in a string it will be escaped because PostgreSQL doesn't support it.
* Command `applyOps` reception will stop the replication server.
* Command `collMod` reception will be ignored.
-* MongoDB sharding environment are not supported currently.
In addition to the previous limitations, just some kind of indexes are supported:
@@ -28,6 +28,16 @@ In addition to the previous limitations, just some kind of indexes are supported
* All keys path with the exception to the paths resolving in scalar value (eg: `db.test.createIndex({"a": 1})` will not index value of key `a` for the document `{"a": [1,2,3]}`)
* Index properties `sparse` and `background` are ignored
+## When ToroDB Stampede might not be the right choice
+
+As good as Stampede is, there are certain use-cases for which it is a bad choice or simply will not work:
+
+* Pattern "key as values". When keys contain values, potentially thousands of different values may appear in keys, leading to an equally high number of columns
+(which might break with some RDBMS which have limits to the number of columns per row, see next point) and/or tables, which might be terribly inconvenient and slow.
+* Too many fields per document, several of them optional and only some appearing per document, which might lead to thousands of columns.
+Some RDBMSs do not support such a high number of columns. For PostgreSQL this limit is around 1600 columns.
+
+
[TODO]: <> (not supported types, we need a list)
[Versions]: <> (this section doesn't make any sense currently)
diff --git a/stampede/documentation/docs/appendix.md b/documentation/docs/appendix.md
similarity index 95%
rename from stampede/documentation/docs/appendix.md
rename to documentation/docs/appendix.md
index abb3b330..4607676a 100644
--- a/stampede/documentation/docs/appendix.md
+++ b/documentation/docs/appendix.md
@@ -17,12 +17,13 @@ Usage: `torodb-stampede [options]`
| --backend-database | The database that will be used. |
| --backend-host | The host or ip that will be used to connect. |
| --backend-port | The port that will be used to connect. |
+| --backend-ssl | Enable SSL for backend connection. |
| --backend-user | The user that will be used to connect. |
| -c, --conf | Configuration file in YAML format. |
| --connection-pool-size | Maximum number of connections to establish to the database. It must be higher or equal than 3. |
| --connection-pool-timeout | The timeout in milliseconds after which retrieve a connection from the pool will fail. |
| --enable-metrics | Enable metrics system. |
-| --enable-ssl | Enable SSL/TLS layer. |
+| --enable-ssl | Enable SSL/TLS for replication layer. |
| -h, --help | Print help and exit. |
| -hp, --help-param | Print help for all available parameters and exit. |
| --log-level | Level of log emitted (will overwrite default log4j2 configuration) |
@@ -41,9 +42,9 @@ Usage: `torodb-stampede [options]`
| --ssl-key-store-password | The password of the Java Key Store file containing and private key used to authenticate client. |
| --ssl-trust-store-file | The path to the Java Key Store file containing the Certification Authority. If CAFile is specified it will be used instead. |
| --ssl-trust-store-password | The password of the Java Key Store file containing the Certification Authority. |
-| --sync-source | The host and port (:) of the node from ToroDB has to replicate. |
+| --sync-source | The host and port (:) of the MongoDB node from ToroDB has to replicate. |
| --toropass-file | You can specify a file that use .pgpass syntax: `::::` (can have multiple lines) |
-| --version | Prints the version. |
+| --version | Prints the version and exit. |
| -x, --xml-conf | Configuration file in XML format. |
## Configuration file
@@ -102,6 +103,7 @@ Another way to configure the system is through configuration file or setting con
| /backend/postgres/user | The user that will be used to connect. |
| /backend/postgres/toropassFile | Path to the file with PostgreSQL access configuration in `.pgpass` syntax. |
| /backend/postgres/applicationName | The application name used by driver to connect. |
+| /backend/postgres/ssl | If `true` Enabled the SSL connection with PostgreSQL server, if `false` is disabled. |
### ToroDB Stampede pool configuration
diff --git a/stampede/documentation/docs/bi-connectors.md b/documentation/docs/bi-connectors.md
similarity index 100%
rename from stampede/documentation/docs/bi-connectors.md
rename to documentation/docs/bi-connectors.md
diff --git a/stampede/documentation/docs/consistency.md b/documentation/docs/consistency.md
similarity index 100%
rename from stampede/documentation/docs/consistency.md
rename to documentation/docs/consistency.md
diff --git a/stampede/documentation/docs/css/8kdata.css b/documentation/docs/css/8kdata.css
similarity index 100%
rename from stampede/documentation/docs/css/8kdata.css
rename to documentation/docs/css/8kdata.css
diff --git a/stampede/documentation/docs/dev-notes.md b/documentation/docs/dev-notes.md
similarity index 86%
rename from stampede/documentation/docs/dev-notes.md
rename to documentation/docs/dev-notes.md
index 3865fa6f..f77af557 100644
--- a/stampede/documentation/docs/dev-notes.md
+++ b/documentation/docs/dev-notes.md
@@ -10,25 +10,25 @@ En esta sección se decriben los pasos a seguir para instalar MongoDB Community
Importar la clave pública utilizada por el gestor de paquetes de Ubuntu.
```
-$ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
+sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
```
Crear la lista de fuentes para MongoDB.
```
-$ echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
+echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
```
Actualizar el listado de paquetes del sistema.
```
-$ sudo apt-get update
+sudo apt-get update
```
Instalar el paquete de MongoDB Community Edition.
```
-$ sudo apt-get install mongodb-org
+sudo apt-get install mongodb-org
```
Crear el fichero `/lib/systemd/system/mongod.service`. __Sólo para Ubuntu 16.04__.
@@ -51,7 +51,7 @@ WantedBy=multi-user.target
Llegados a este punto, MongoDB debería estar correctamente instalado, para arrancar o parar el servicio usaremos el comando `service`. Por ejemplo para reiniciar el servicio haremos:
```
-$ sudo service mongod restart
+sudo service mongod restart
```
Si ejecutamos el comando `mongo` podremos ver como se accede a la consola de MongoDB y se pueden ejecutar los diferentes comandos de MongoDB.
@@ -74,7 +74,7 @@ replication:
Hecho esto, reiniciamos el servicio.
```
-$ sudo service mongod restart
+sudo service mongod restart
```
Ahora podemos acceder a la consola de MongoDB con el comando `mongo` para poder completar la configuración del replica set. Para ello, lo único que debemos hacer es inicializar el nodo como un replica set con el siguiente comando.
@@ -101,14 +101,14 @@ Se puede encontrar más información en el siguiente [enlace](https://www.digita
Instalación de los paquetes necesarios.
```
-$ sudo apt-get update
-$ sudo apt-get install postgresql postgresql-contrib
+sudo apt-get update
+sudo apt-get install postgresql postgresql-contrib
```
Podemos comprobar que la instalación ha sido satisfactoria accediendo a la consola de PostgreSQL.
```
-$ sudo -u postgres psql
+sudo -u postgres psql
```
## Java
@@ -120,7 +120,7 @@ ToroDB Stampede está escrito en Java y por tanto es necesario tener instalada u
La instalación de Oracle Java 8 en Ubuntu 16.04 se hace a partir de paquetes del sistema, por lo que basta ejecutar los siguientes comandos.
```
-$ sudo add-apt-repository ppa:webupd8team/java
-$ sudo apt-get update
-$ sudo apt-get install oracle-java8-installer
+sudo add-apt-repository ppa:webupd8team/java
+sudo apt-get update
+sudo apt-get install oracle-java8-installer
```
diff --git a/documentation/docs/faq.md b/documentation/docs/faq.md
new file mode 100644
index 00000000..6175d35b
--- /dev/null
+++ b/documentation/docs/faq.md
@@ -0,0 +1,111 @@
+Frequently Asked Questions
+
+## Why that name?
+
+Toro means bull in Spanish. ToroDB was founded in Madrid, Spain, by [8Kdata](https://8kdata.com/). It is the very first general-purpose database software ever built by a Spanish entity. We are very proud of this fact and wanted to name it after a well-known symbol of Spain, the toro. And the toro is a fast, solid, strong, but noble animal. Just like ToroDB.
+
+## If ToroDB uses PostgreSQL, why not just base it on jsonb?
+
+jsonb is a really cool data type for PostgreSQL, with a rich function set support that allows JSON data in a regular column, and it supports advanced indexing. jsonb was intended to allow adding some unstructured column(s) to your relational tables, and it fits really well for that purpose. But ToroDB's design and goals go way beyond jsonb's:
+
+* Transform your unstructured data to a relational design, that leads to significant improvements in storage/IO/cache, having data partitioned by "type" and automatic data normalization.
+
+* Provide native support for a NoSQL API --like ToroDB does with MongoDB's wire protocol and query API-- so you could directly use your MongoDB drivers, code and tools to interface with the database.
+
+* Offer replication and sharding the same way NoSQL does (like replicating from a MongoDB replica set).
+
+* Support non-PostgreSQL backends. While we love PostgreSQL, one size does not fit all, and other people have different requirements or different environments, like MPP (Massively Parallel) databases, in-memory solutions or just different stacks.
+
+Still, ToroDB uses a little bit of jsonb internally: to represent arrays of scalar values; and to represent the structure table, which stores the "shape" ("type") of the documents in the collection.
+
+## What about ToroDB's performance?
+
+Contrary to some popular beliefs, RDBMSs are not slow. Indeed, they can be quite fast. It's not hard, for instance, to achieve dozens or [hundreds of thousands of tps on RDBMSs like PostgreSQL](http://obartunov.livejournal.com/181981.html). The main problem is that benchmarks usually compare apples to oranges. Durability, for instance, is frequently reduced or suppressed in most NoSQL benchmarks, while it significantly impacts performance. The same goes on with replication. Take for instance a typical MongoDB benchmark, add journaling and replication (which you will very likely have turned on in a production environment), and your numbers will drop by an order of magnitude (160K tps vs 32K tps, 50% reads + 50% writes: [http://obartunov.livejournal.com/181981.html](http://obartunov.livejournal.com/181981.html)).
+
+## What databases does ToroDB support as backends? Are there any plans to support other backends?
+
+Currently, ToroDB supports PostgreSQL as a backend. However, design and code have always kept in mind the possibility of supporting other backends. So it's technically possible and it will happen. Stay tuned!
+
+## How do I optimally configure PostgreSQL for ToroDB?
+
+As per ToroDB, there are no special configuration parameters required. So it really depends on your hardware characteristics, workload, network architecture and so on. Usual PostgreSQL configuration recommendations apply. There are hundreds of places on the Internet that discuss how to do this. You may start from [Tuning Your PostgreSQL Server](https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server) if you need some help.
+
+Here are some recommendations though:
+
+As with any other Postgres configuration, don't forget to tune the "ususal suspects" such as shared_buffers and checkpoint_segments (or max_wal_size if on 9.5).
+
+Be aware of the memory allocated for PostgreSQL and the JVM if they are both co-located. If this is the case, you may probably want to allocate shared_buffers as you usually do, but reduce effective_cache_size by at least the maximum amount of heap allocated by the JVM (-Xmx).
+
+Consider [setting synchronous_commit](http://www.postgresql.org/docs/9.4/static/runtime-config-wal.html) to off if you can tolerate some potential data loss. This will not corrupt your data in any way, and may improve performance. It is similar to MongoDB's behavior, where you may get writes acknowledged that may be lost if the server crashes during a small time window after the write happened. Please review wal_writer_delay if setting synchronous_commit to off to control the risk of potential data loss.
+
+Make sure that ToroDB's configuration parameters generic.connectionPoolSize and generic.reservedReadPoolSize do not add up to more than max_connections.
+
+Use data checksums for your PostgreSQL cluster if you want checksum validation at rest.
+
+## What is ToroDB's license?
+
+ToroDB is licensed under the GNU Affero General Public License v3 ([AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html)). This means that ToroDB is free software, and you may freely use it, run it, modify and inspect it, as long as you comply with the terms of the license. As a non authoritative summary, this basically means that:
+
+ToroDB is provided free of charge. Just download and use it.
+If you make a derived version of ToroDB, or integrate ToroDB with other software, all of it must also be licensed under the AGPLv3 or a compatible license. This implies that users of your software will also have the same rights as ToroDB users, including access to ToroDB's source code. Copyright must also be preserved.
+
+If you offer ToroDB or a derived work as a hosted service (like a DbaaS --Database as a Service--), your users are also bound by this license and the rights granted by the license also apply to them.
+
+If you want to create a derived work or integrate ToroDB or parts of it into proprietary software, or do not want to be bound by the terms of the AGPLv3, please contact us at torodb at torodb dot com.
+
+## What is MongoWP and how is it related to ToroDB?
+
+MongoWP (Mongo Wire Protocol) is a component layer of ToroDB. However, it is being developed independently of ToroDB, and it is available at a [separate Github repository](https://github.com/8kdata/mongowp). MongoWP provides an abstraction layer for any Java-based software that would want to behave as a MongoDB server. It implements the MongoDB wire protocol and abstracts mongowp users from it. Just implement mongowp's API and start coding your own MongoDB server! It may also be the basis for other MongoDB-protocol related software such as clients (there's some basic client support in mongowp), proxies, query routers, etc.
+
+MongoWP is based on Netty, a great asynchronous network I/O framework for the JVM. Netty is based on the event-based architecture, which does allocate a small number of threads for incoming connections, rather than a thread-per-connection, resulting in a really fast request dispatcher.
+
+## What other open source components does ToroDB use?
+
+* [PostgreSQL](http://www.postgresql.org/). The most advanced open source database.
+
+* [Netty](http://netty.io/), used by MongoWP. The great asynchronous network I/O framework for the JVM.
+
+* [jOOQ](http://www.jooq.org/). jOOQ generates Java code from your database and lets you build type safe SQL queries through its fluent API.
+
+* [HikariCP](http://brettwooldridge.github.io/HikariCP/). The fastest Java connection pooler.
+
+There are also many other Java libraries used by ToroDB like [ThreeTen](http://www.threeten.org/), [Guava](https://github.com/google/guava), [Guice](https://github.com/google/guice), [Findbugs](http://findbugs.sourceforge.net/), [jCommander](http://jcommander.org/), [Jackson](http://wiki.fasterxml.com/JacksonHome) and some others. We also use [Travis](https://travis-ci.org/) for CI tests.
+
+ToroDB has the deepest gratitude to all the above projects, that are great components, and every other bit of open source that directly or indirectly helps building or running ToroDB.
+
+## Which indexes are created?
+
+ToroDB Stampede doesn't support all index types. Some indexes are supported or partialy supported, and other are skipped.
+
+ * **Single field indexes**: Are fully supported.
+ * **Compound indexes**: Are not supported and are not created.
+ * **Multikey indexes**: The only multikey indexes created in ToroDB Stampede are those whose field(s) are in a embedded document. Multikey indexes over scalar values of an array are not created.
+ * **Text indexes**: Are not supported and are not created.
+ * **2dsphere indexes**: Are not supported and are not created.
+ * **2d indexes**: Are not supported and are not created.
+ * **Hashed indexes**: Are not supported and are not created.
+
+Any created index can be explicitly [excluded in the configuration](installation/configuration.md#exclude-a-mongodb-index)
+
+## The command wget is not found in macOS
+
+By default macOS hasn't the wget tool in the terminal, if you want to use it [Homebrew](http://brew.sh) can be used.
+
+Once installed Homebrew, it can be installed with `brew install wget`.
+
+## No pg_hba.conf entry
+
+Depending on the running Linux distribution and PostgreSQL installation, the error below could appear.
+
+```
+FATAL: no pg_hba.conf entry for host "...", user "...", database "...", SSL off
+```
+
+This happens because some installations of PostgreSQL are configured with strict security policies. So PostgreSQL reject host connections through TCP. The `pg_hba.conf` file (usually located in the PostgreSQL's data directory or configuration directory) must be edited with a rule that allows access to the database for the ToroDB Stampede user.
+
+```
+ host torod torodb 127.0.0.1/32 md5
+ host torod torodb ::1/128 md5
+```
+
+__Make sure that new rules precede any other rule for same host that apply to all users (eg: 127.0.0.1/32). For more informations on `pg_hba.conf` refer to the [Official PostgreSQL documentation](https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html)__.
diff --git a/stampede/documentation/docs/glossary.md b/documentation/docs/glossary.md
similarity index 100%
rename from stampede/documentation/docs/glossary.md
rename to documentation/docs/glossary.md
diff --git a/stampede/documentation/docs/how-to-use.md b/documentation/docs/how-to-use.md
similarity index 92%
rename from stampede/documentation/docs/how-to-use.md
rename to documentation/docs/how-to-use.md
index 2952adbd..4fdbb6ec 100644
--- a/stampede/documentation/docs/how-to-use.md
+++ b/documentation/docs/how-to-use.md
@@ -4,10 +4,10 @@ To understand better how the JSON document to relational storage mapping algorit
Given that ToroDB Stampede and all its requisites are met, the dataset will be imported into MongoDB to be replicated in PostgreSQL. This is done with next commands.
-```
-$ wget https://www.dropbox.com/s/570d4tyt4hpsn03/primer-dataset.json?dl=0
+```no-highlight
+wget https://www.torodb.com/download/primer-dataset.json
-$ mongoimport -d stampede -c primer primer-dataset.json
+mongoimport -d stampede -c primer primer-dataset.json
```
The import was done with database `stampede` and collection `primer`, this is important because it determines the schema and table names created in the relational storage. In PostgreSQL the replication is done in the `torod` database, schema `stampede` with one root table #`primer` and some associated tables named like `primer_*`.
@@ -54,7 +54,7 @@ As stated above, the root of the document is mapped to a table with the name use
Each element of the root level is mapped to a different column of the table, either an scalar or subdocument. Next chapter contains the different datatypes that can be created in the relational schema. All of them are indicated as a postfix of the column name, for example `cuisine` key is created as `cuisine_s` because it contains string values.
-```
+```no-highlight
did | address_e | restaurant_id_s | name_s | cuisine_s | _id_x | borough_s | grades_e
-----+-----------+-----------------+----------------------------------------------------------------------------------------------------+------------------------------------------------------------------+----------------------------+---------------+----------
0 | f | 40384115 | Phil & Sons Restaurant & Pizzeria | Pizza/Italian | \x580f12efbe6e3fff2237caef | Queens | t
@@ -69,7 +69,7 @@ did | address_e | restaurant_id_s |
#### primer_address
-```
+```no-highlight
did | rid | seq | zipcode_s | coord_e | street_s | building_s
-----+-------+-----+-----------+---------+----------------------------------------+------------
0 | 0 | | 11355 | t | Main Street | 57-29
@@ -84,9 +84,9 @@ did | rid | seq | zipcode_s | coord_e | street_s
#### primer_address_coord
-The table `primer_address_coord` is a special case, like `primer_grades`, because those paths contain an array. That is the reason why a column `seq` is used in those tables, indicating the position of the element in the original arrays. To understand better the metadata columns it is recommended to read the chapter [metada](how-to-use.md#metadata).
+The table `primer_address_coord` is a special case, like `primer_grades`, because those paths contain an array. That is the reason why a column `seq` is used in those tables, indicating the position of the element in the original arrays. To understand better the metadata columns it is recommended to read the chapter [metadata](how-to-use.md#metadata).
-```
+```no-highlight
did | rid | pid | seq | v_d
-----+-------+-------+-----+--------------
0 | 0 | 0 | 0 | -73.825679
@@ -109,7 +109,7 @@ did | rid | pid | seq | v_d
#### primer_grades
-```
+```no-highlight
did | rid | seq | date_t | score_i | grade_s | score_n
-----+-------+-----+------------------------+---------+----------------+---------
0 | 0 | 0 | 2014-08-21 02:00:00+02 | 6 | A |
@@ -163,19 +163,28 @@ The different data types used by ToroDB Stampede are represented in the table be
| Postfix | What does it mean? |
|---------|--------------------|
+| _a | This represents MongoDB's MAX_KEY type, stored with a true value. |
| _b | Boolean value, stored as a boolean in PostgreSQL. |
| _c | A date (with time) value in format ISO-8601, stored with PostgreSQL type date. |
| _d | A 64-bit IEEE 754 floating point, stored with PostgreSQL type double precision. |
| _e | A child element, it can be an object or an array, stored with PostgreSQL type boolean with a value of false to indicate a child object and true to indicate a child array. |
+| _g | A PostgreSQL jsonb type, composed of two strings meaning the pattern and the evaluation options for a RegEx in MongoDB's style. |
| _i | A 32-bit signed two's complement integer, stored with PostgreSQL type integer. |
+| _j | This represents the MONGO_JAVASCRIPT type, stored with PostgreSQL type character varying. |
+| _k | This represents MongoDB's MIN_KEY type, stored with a false value. |
| _l | A 64-bit signed two's complement integer, stored with PostgreSQL type bigint. |
-| _n | A null value, stored with PostgreSQL type boolean (nullable). It cannot take value false, just true or null. When the value is true means the JSON document has value null for that path, when it is null it means the path has another value or does not exist for that document. |
| _m | A time value in format ISO-8601, stored with PostgreSQL type time. |
+| _n | A null value, stored with PostgreSQL type boolean (nullable). It cannot take value false, just true or null. When the value is true means the JSON document has value null for that path, when it is null it means the path has another value or does not exist for that document. |
+| _p | This represents the MONGO_DB_POINTER type, and it is stored as a PostgreSQL jsonb, composed of two strings meaning the namespace and the objectId. |
+| _q | This represents MongoDB's Decimal128 type. It's stored as a PostgreSQL type containing a numeric value and three booleans that specify whether the value is or isn't infinite, NaN or negative zero. |
| _r | Binary object, it is an array of bytes stored in PostgreSQL as bytea. |
| _s | An array of UTF-8 characters representing a text, stored with PostgreSQL type character varying. |
| _t | Number of milliseconds from 1970-01-01T00:00:00Z, stored with PostgreSQL type timestamptz. |
-| _x | This represent the MONGO_OBJECT_ID and it is stored as a PostgreSQL bytea. |
-| _y | This represent the MONGO_TIMESTAMP and it is stored as a PostgreSQL composite type formed by an integer column secs and an integer column counter. |
+| _u | This represents the undefined type, stored with a true value. |
+| _w | This represents the MONGO_JAVASCRIPT_WITH_SCOPE type, stored with PostgreSQL type jsonb. |
+| _x | This represents the MONGO_OBJECT_ID and it is stored as a PostgreSQL bytea. |
+| _y | This represents the MONGO_TIMESTAMP and it is stored as a PostgreSQL composite type formed by an integer column secs and an integer column counter. |
+| _z | This represents the DEPRECATED type. We assign a String to represent it, so it is stored with PostgreSQL type character varying. |
__Notes about MONGO_OBJECT_ID__: ObjectIds are small, likely unique, fast to generate, and ordered. ObjectId values consists of 12-bytes, where the first four bytes are a timestamp that reflect the ObjectId’s creation, specifically:
@@ -186,11 +195,11 @@ __Notes about MONGO_OBJECT_ID__: ObjectIds are small, likely unique, fast to gen
### Data conflict resolution
-Because the JSON documents nature, it can happen that the same path contains different data types or even in some documents the path doesn't exist. That is not a problem for the JSON document but it is for a relational storage where each column should have an associated data type.
+Because of JSON documents nature, it can happen that the same path contains different data types or even in some documents the path doesn't exist. That is not a problem for the JSON document but it is for a relational storage where each column should have an associated data type.
To solve this problem in ToroDB Stampede, each data type has a different column. For example, in the `primer_grades` table there are two different columns for the `score` key. One is `score_i` that represents the integer values and another one is `score_n` that represents when that value contains null in the original document (because it is mandatory to detect when null value was given and when the path was not given).
-```
+```no-highlight
did | rid | seq | date_t | score_i | grade_s | score_n
------+-------+-----+------------------------+---------+----------------+---------
0 | 0 | 0 | 2014-08-21 02:00:00+02 | 6 | A |
@@ -259,7 +268,7 @@ The metadata columns in the data tables are not enough to keep the data integrit
Table `database` stores the name given by the user to the database in MongoDB, that is stored in a schema in PostgreSQL. Because PostgreSQL has limits on the database names it is dereferenced here, but usually the values are the same unless a very large name is used.
-```
+```no-highlight
# select * from database;
name | identifier
@@ -271,7 +280,7 @@ Table `database` stores the name given by the user to the database in MongoDB, t
Among the name of the database one, collection name was given in MongoDB layer, so it is stored in the table `collection` dereferencing it in the same way.
-```
+```no-highlight
# select * from collection;
database | name | identifier
@@ -285,7 +294,7 @@ As stated above, the name of the table for the root element is the same one used
With larger paths, like `address.coord`, the table ref will be the composition of the path, so `{address,coord}`. And the table identifier will be the concatenation of the dereferenced names of collection and path identifiers `primer_address_coord`.
-```
+```no-highlight
# select * from doc_part;
database | collection | table_ref | identifier | last_rid
@@ -300,7 +309,7 @@ With larger paths, like `address.coord`, the table ref will be the composition o
`field` table stores the data type of each column and its identifier. For a given combination of `database, collection, table_ref`, the used name of the column is stored and the data type associated. This data type can be either a scalar value, like `string` or `double`, or a `child` type (this means an associated table exists)
-```
+```no-highlight
# select * from field;
database | collection | table_ref | name | type | identifier
@@ -328,7 +337,7 @@ With larger paths, like `address.coord`, the table ref will be the composition o
In the given example, the only row in `scalar` table is related to the path `address.coord` with type `double`. This means that column `v_d` in the table `stampede_address_coord` is a `double`.
-```
+```no-highlight
# select * from scalar;
database | collection | table_ref | type | identifier
@@ -342,7 +351,7 @@ The data in the relational storage can be queries like any other relational data
For example, the name of all bakeries in the ZIP code 10462, could be:
-```
+```no-highlight
select p.name_s from primer p, primer_address pa
where
p.cuisine_s = 'Bakery'
@@ -350,7 +359,7 @@ where
and pa.zipcode_s = '10462'
```
-```
+```no-highlight
# select p.name_s from primer p, primer_address pa where p.cuisine_s = 'Bakery' and p.did = pa.did and pa.zipcode_s = '10462';
name_s
@@ -366,7 +375,7 @@ where
One of the advantages having the data in a relational format is the ability to execute complex queries in a fast and efficient way. For example, to the previous query, the average score of each bakery could be added with just a few lines.
-```
+```no-highlight
select p.name_s, avg(pg.score_i)
from primer p, primer_address pa, primer_grades pg
where
@@ -377,7 +386,7 @@ where
group by p.name_s
```
-```
+```no-highlight
# select p.name_s, avg(pg.score_i) from primer p, primer_address pa, primer_grades pg where p.cuisine_s = 'Bakery' and p.did = pa.did and pa.zipcode_s = '10462' and pg.did = p.did group by p.name_s;
name_s | avg
@@ -393,7 +402,7 @@ group by p.name_s
And one filter can be applied with a few lines more, keeping query very simple and the execution time responsive.
-```
+```no-highlight
select p.name_s, avg(pg.score_i)
from primer p, primer_address pa, primer_grades pg
where
@@ -405,7 +414,7 @@ group by p.name_s
having avg(pg.score_i) > 10
```
-```
+```no-highlight
# select p.name_s, avg(pg.score_i) from primer p, primer_address pa, primer_grades pg where p.cuisine_s = 'Bakery' and p.did = pa.did and pa.zipcode_s = '10462' and pg.did = p.did group by p.name_s having avg(pg.score_i) > 10;
name_s | avg
diff --git a/stampede/documentation/docs/images/pid_reference.jpeg b/documentation/docs/images/pid_reference.jpeg
similarity index 100%
rename from stampede/documentation/docs/images/pid_reference.jpeg
rename to documentation/docs/images/pid_reference.jpeg
diff --git a/stampede/documentation/docs/images/tables_distribution.jpeg b/documentation/docs/images/tables_distribution.jpeg
similarity index 100%
rename from stampede/documentation/docs/images/tables_distribution.jpeg
rename to documentation/docs/images/tables_distribution.jpeg
diff --git a/stampede/documentation/docs/images/toro_stampede_mapping.jpg b/documentation/docs/images/toro_stampede_mapping.jpg
similarity index 100%
rename from stampede/documentation/docs/images/toro_stampede_mapping.jpg
rename to documentation/docs/images/toro_stampede_mapping.jpg
diff --git a/stampede/documentation/docs/images/toro_stampede_structure.jpg b/documentation/docs/images/toro_stampede_structure.jpg
similarity index 100%
rename from stampede/documentation/docs/images/toro_stampede_structure.jpg
rename to documentation/docs/images/toro_stampede_structure.jpg
diff --git a/stampede/documentation/docs/index.md b/documentation/docs/index.md
similarity index 100%
rename from stampede/documentation/docs/index.md
rename to documentation/docs/index.md
diff --git a/documentation/docs/installation/binaries.md b/documentation/docs/installation/binaries.md
new file mode 100644
index 00000000..57f8dfee
--- /dev/null
+++ b/documentation/docs/installation/binaries.md
@@ -0,0 +1,85 @@
+Installation with binaries
+
+One of the recommended ways to use ToroDB Stampede is through the binary distribution. It means that a precompiled distribution is downloaded and then executed using command tools.
+
+## Linux/macOS
+
+Given that [previous requirements](previous-requirements.md) are met and default configuration is used, to launch ToroDB Stampede download distribution from the next [link](https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.tar.bz2), extract and execute it.
+
+```no-highlight
+wget https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.tar.bz2
+
+tar xjf torodb-stampede-1.0.0-beta2.tar.bz2
+
+export TOROHOME="$(pwd)/torodb-stampede-1.0.0-beta2"
+
+"$TOROHOME/bin/torodb-stampede"
+```
+
+### Configure as a Linux systemd service
+
+You can install ToroDB Stampede as a systemd service following the next steps:
+
+```no-highlight
+sudo ln -s "$TOROHOME/bin/torodb-stampede" /usr/bin/.
+
+sudo useradd -M -d "$TOROHOME" torodb
+
+sudo cp "$TOROHOME/systemd/torodb-stampede.service.sample" /lib/systed/system/torodb-stampede.service
+```
+
+#### Manage systemd service
+
+##### Starting the service
+
+Make shure you have enable ToroDB Stampede service. To enable the service just run:
+
+```no-highlight
+sudo systemctl enable torodb-stampede
+```
+
+To start the service run:
+
+```no-highlight
+sudo systemctl start torodb-stampede
+```
+
+##### Stopping the service
+
+To stop ToroDB Stampede service:
+
+```no-highlight
+sudo systemctl stop torodb-stampede
+```
+
+##### Accessing logs
+
+To view logs of ToroDB Stampede service:
+
+```no-highlight
+sudo journalctl --no-pager -u torodb-stampede
+```
+
+Following logs:
+
+```no-highlight
+sudo journalctl --no-pager -u torodb-stampede -f
+```
+
+View all logs:
+
+```no-highlight
+sudo journalctl --no-tail --no-pager -u torodb-stampede
+```
+
+
+## Windows
+
+Given that [previous requirements](previous-requirements.md#create-toropass-file) are met, the only step needed to launch ToroDB Stampede is:
+
+* Download distribution from the next [link](https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.zip).
+* Uncompress the downloaded Zip file in the final ToroDB Stampede directory (`%TOROHOME%`).
+* Execute the command `C:\>%TOROHOME%\bin\torodb-stampede` or simply, double click on the `torodb-stampede.bat` file located in folder `bin`.
+
+
+
diff --git a/documentation/docs/installation/configuration.md b/documentation/docs/installation/configuration.md
new file mode 100644
index 00000000..b169ead3
--- /dev/null
+++ b/documentation/docs/installation/configuration.md
@@ -0,0 +1,301 @@
+Configuration
+
+ToroDB Stampede can be launch with custom configuration options. There are two ways to do it, using command modifiers or using a configuration file. The recommended way is using a configuration file because it is more versatile and self-documented.
+
+To use the configuration file, the `-c` parameter should be specified.
+
+```no-highlight
+torodb-stampede -c myconfiguration.yml
+
+```
+
+Also you can check configuration used by ToroDB Stampede using the `-l` parameter.
+
+```no-highlight
+torodb-stampede -l
+```
+
+The previous sections talk about basic configuration of the system, but it is highly probable that some specific configuration must be done to work in production environments.
+
+## Custom PostgreSQL connection
+
+By default ToroDB Stampede connects to PostgreSQL using the following configuration:
+
+```json
+backend:
+ postgres:
+ host: localhost
+ port: 5432
+ database: torod
+ user: torodb
+ toropassFile: "~/.toropass"
+ applicationName: "toro"
+ ssl: false
+```
+
+You may change this configuration depending on your requisites.
+You can enabled SSL connection setting `ssl: true` in configuration file.
+To provide the PostgreSQL user's password that ToroDB Stampede will use to connect to PostgreSQL
+you can specify parameter `--ask-for-password` to make ToroDB Stampede prompt for the password while starting up
+or you create a PostgreSQL credentials configuration file `~/.toropass`, using the `.pgpass` file format.
+The right format is one or more lines formatted as `::::`.
+
+```no-highlight
+echo "localhost:5432:torod:torodb:torodb" > ~/.toropass
+chmod 400 ~/.toropass
+```
+
+You may change the `.toropass` path using the `toropassFile` parameter in ToroDB Stampede configuration file. For example:
+
+```json
+backend:
+ postgres:
+ host: localhost
+ port: 5432
+ database: torod
+ user: torodb
+ toropassFile: /secret/mytoropass
+ applicationName: "toro"
+ ssl: false
+```
+## Backend connection pooling
+
+By default ToroDB Stampede uses a connection pool with the following configuration:
+
+```json
+backend:
+ pool:
+ connectionPoolTimeout: 10000
+ connectionPoolSize: 30
+```
+
+You may tune those parameters at will. The only constraint is that `connectionPoolSize` has to be at least 20.
+
+## Custom MongoDB connection
+
+ToroDB Stampede will connect to MongoDB using no authentication and no SSL connection by default. You can set up the connection to MongoDB using `auth` and `ssl` sections in ToroDB Stampede configuration.
+
+For example to connect using cr or scram_sha1 authentication mode with simple SSL support you may use following configuration:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ auth:
+ mode: negotiate
+ user: mymongouser
+ source: mymongosource
+ ssl:
+ enabled: true
+ allowInvalidHostnames: false
+ caFile: mycafile.pem
+```
+
+## Filtering replication
+
+By default ToroDB Stampede replicates all databases and collections available in your MongoDB.
+You can specify some filters that allow to include a single database, include only some collections,
+exclude a whole database or exclude some collections changing ToroDB Stampede configuration.
+Exclusions always override inclusions so that if you exclude something it will prevail over an inclusion.
+
+!!! info
+ Lets assume for our examples that you have two databases, *films* and *music*, and each one has two collections, *title* and *performer*.
+
+### Include only a MongoDB database or collection
+
+In the replication section of the yml config file add an include item with the database to include:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ include:
+ : "*"
+```
+
+or if you want to include just some collections:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ include:
+ :
+ -
+ -
+```
+
+If you want to include only the database called *film* but not the specific collection *performer* from same *film* database, you should write:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ include:
+ film: "performer"
+```
+
+!!! danger "Inclusion removal"
+ If you stop ToroDB Stampede, remove an inclusion, and restart ToroDB Stampede, the replication process will replicate operations on this database/collection
+ without replicating previously data form not included database/collection, reaching an inconsistent state.
+
+ It is recommended to delete ToroDB Stampede database and restart the whole replication process from scratch.
+
+### Include only a MongoDB collection and a specific index inside that collection
+
+Sometimes you may want be sure that only specific indexes created in MongoDB have to be replicated by ToroDB Stampede.
+MongoDB indexes can be included in ToroDB Stampede allowing you to save disk space and remove unuseful indexes. You just need to add the index name in the include section.
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ include:
+ :
+ :
+ - name:
+```
+
+If you want to include only collection *performer* from *film* database with the index called *city*, you should write:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ include:
+ film:
+ performer:
+ - name: "city"
+```
+
+!!! danger "Inclusion removal"
+ If you stop ToroDB Stampede, remove an inclusion, and restart ToroDB Stampede, the replication process will not create the previously not included indexes.
+ ToroDB Stampede only creates indexes at the initial recovery process and when a create index command is found in the oplog replication process.
+
+### Exclude a MongoDB database or collection
+
+In the replication section of the yml config file add an exclude item with the database to exclude:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ exclude:
+ : "*"
+```
+
+or if you want to exclude just some collections:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ exclude:
+ :
+ -
+ -
+```
+
+The configuration to exclude the whole *music* database, but in *film* database only *performer* collection, you should write:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ exclude:
+ music: "*"
+ film:
+ - performer
+```
+
+In this case the only collection replicated is *title* from *film* database.
+
+!!! danger "Exclusion removal"
+ If you stop ToroDB Stampede, remove an exclusion, and restart ToroDB Stampede, the replication process will replicate operations on this database/collection
+ without replicating previously data form this database/collection, reaching an inconsistent state.
+
+ It is recommended to delete ToroDB Stampede database and restart the whole replication process from scratch.
+
+### Exclude a MongoDB index
+
+Some index created in MongoDB for OLTP operations can be useless for OLAP and analytics operations.
+MongoDB indexes can be excluded in ToroDB Stampede allowing you to save disk space. You just need to add the index name in the exclude section.
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ exclude:
+ :
+ :
+ - name:
+```
+
+If you want to exclude the index called *city* on collection *performer* from *film* database, you should write:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ exclude:
+ film:
+ performer:
+ - name: city
+```
+
+Any unsupported index in ToroDB Stampede (text , 2dsphere, 2d, hashed, ...) is ignored and is not created in the relational database, and you don't need to exclude it.
+
+!!! danger "Exclusion removal"
+ If you stop ToroDB Stampede, remove an exclusion, and restart ToroDB Stampede, the replication process will not create the previously excluded indexes.
+ ToroDB Stampede only creates indexes at the initial recovery process and when a create index command is found in the oplog replication process.
+
+### Include only a MongoDB database but not a specific collection
+
+You can combine the include and exclude sections to indicate that only a particular database have to be included, but exclude a particular collection in the database.
+
+If you want to include only the database called *film* but not the specific collection *performer* from same *film* database, you should write:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ include:
+ film: "*"
+ exclude:
+ film: "performer"
+```
+
+### Include only a MongoDB collection in a database but not a specific index inside that collection
+
+You can combine the include and exclude sections to indicate that only a particular collection have to included with all indexes excluding just one.
+
+If you want to include only collection *performer* from *film* database but not the index called *city*, you should write:
+
+```json
+replication:
+ replSetName: rs1
+ syncSource: localhost:27017
+ include:
+ film:
+ performer: "*"
+ exclude:
+ film:
+ performer:
+ - name: "city"
+```
+
+## Replicate from a MongoDB Sharded Cluster
+
+
+In the replication section of the yml config file add a shards item with the list of shards's configurations, one for each shard:
+
+```json
+replication:
+ shards:
+ - replSetName: shard1
+ syncSource: localhost:27020
+ - replSetName: shard2
+ syncSource: localhost:27030
+ - replSetName: shard3
+ syncSource: localhost:27040
+```
diff --git a/documentation/docs/installation/deb.md b/documentation/docs/installation/deb.md
new file mode 100644
index 00000000..63d69436
--- /dev/null
+++ b/documentation/docs/installation/deb.md
@@ -0,0 +1,57 @@
+Installation for Ubuntu/Debian
+ToroDB Stampede can be installed from a PPA repository in two flavours:
+
+* torodb-stampede: in this package ToroDB Stampede comes without the backend dependency. This package is used when you have PostgreSQL installed in a different machine.
+* torodb-stampede-postgres: in this package ToroDB Stampede comes with a PostgreSQL dependency. This package is handy if you want to minimize configuration steps but have te requirements of install ToroDB Stamepde and PostgreSQL server in separate machines.
+
+## Install package torodb-stampede
+
+Just run:
+
+```
+sudo add-apt-repository -y ppa:8kdata
+sudo apt update
+sudo apt install torodb-stampede
+```
+
+And then to setup ToroDB Stampede run interactive script as root user:
+
+```
+sudo torodb-stampede-setup
+```
+
+You will be prompted to provide superuser credentials (if you didn't created ToroDB's database and user yourself), ToroDB's user credentials and MongoDB credentials.
+
+!!! info "Manage ToroDB Stampede service"
+ To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service)
+
+## Install package torodb-stampede-postgres
+
+Just run:
+
+```
+sudo add-apt-repository -y ppa:8kdata
+sudo apt update
+sudo apt install torodb-stampede-postgres
+```
+
+And then to setup ToroDB Stampede run interactive script as root user:
+
+```
+sudo torodb-stampede-setup
+```
+
+You will be prompted to provide MongoDB credentials.
+
+!!! info "Manage ToroDB Stampede service"
+ To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service).
+
+## Nightly build packages
+
+To install latest unstable nightly build packages just use ppa-dev repository:
+
+```
+sudo add-apt-repository -y ppa:8kdata/ppa-dev
+sudo apt update
+sudo apt install torodb-stampede
+```
diff --git a/stampede/documentation/docs/installation/docker.md b/documentation/docs/installation/docker.md
similarity index 67%
rename from stampede/documentation/docs/installation/docker.md
rename to documentation/docs/installation/docker.md
index 1baba671..46aa7fa8 100644
--- a/stampede/documentation/docs/installation/docker.md
+++ b/documentation/docs/installation/docker.md
@@ -8,15 +8,15 @@ ToroDB Stampede can be tested in a Docker container in two different ways. First
If `.toropass` file is created the docker containers can be launched with the command below.
```no-highlight
-$ docker run -ti -v `realpath `:/root/.toropass torodb/stampede
+docker run -ti -v `realpath `:/root/.toropass torodb/stampede
```
In other case it will be enough with the creation of the environment variable `TORODB_BACKEND_PASSWORD`.
```no-highlight
-$ TORODB_BACKEND_PASSWORD=""
+TORODB_BACKEND_PASSWORD=""
-$ docker run -ti torodb/stampede
+docker run -ti torodb/stampede
```
### With Docker Compose
@@ -24,9 +24,9 @@ $ docker run -ti torodb/stampede
The docker compose file must be downloaded and executed.
```no-highlight
-$ wget https://raw.githubusercontent.com/torodb/torodb/master/stampede/main/src/main/dist/docker/compose/torodb-stampede-fullstack/docker-compose.yml
+wget https://raw.githubusercontent.com/torodb/stampede/master/main/src/main/dist/docker/compose/torodb-stampede-fullstack/docker-compose.yml
-$ docker-compose up
+docker-compose up
```
## From source code
@@ -36,13 +36,13 @@ $ docker-compose up
The source code contains some Maven tasks that can build the right artifacts to execute ToroDB Stampede and its dependencies in Docker containers.
```no-highlight
-$ mvn clean package -P prod,docker -Ddocker.skipbase=false
+mvn clean package -P prod,docker -Ddocker.skipbase=false
-$ mvn -f stampede/main/pom.xml -P docker-stampede-fullstack docker:run -Ddocker.follow
+mvn -f stampede/main/pom.xml -P docker-stampede-fullstack docker:run -Ddocker.follow
```
Sometimes, errors can appear due to the Docker cache. If that happens, cache can be disabled using command options, like is done in the next example. Usually these errors are related to network connection timeouts.
```no-highlight
-$ mvn clean package -P prod,docker -Ddocker.skipbase=false -Ddocker.nocache=true
+mvn clean package -P prod,docker -Ddocker.skipbase=false -Ddocker.nocache=true
```
diff --git a/stampede/documentation/docs/installation/jvm-configuration-tips.md b/documentation/docs/installation/jvm-configuration-tips.md
similarity index 100%
rename from stampede/documentation/docs/installation/jvm-configuration-tips.md
rename to documentation/docs/installation/jvm-configuration-tips.md
diff --git a/stampede/documentation/docs/installation/postgresql-configuration-tips.md b/documentation/docs/installation/postgresql-configuration-tips.md
similarity index 99%
rename from stampede/documentation/docs/installation/postgresql-configuration-tips.md
rename to documentation/docs/installation/postgresql-configuration-tips.md
index f77cd729..b0b2f4de 100644
--- a/stampede/documentation/docs/installation/postgresql-configuration-tips.md
+++ b/documentation/docs/installation/postgresql-configuration-tips.md
@@ -34,6 +34,7 @@ It is the maximum time between automatic WAL checkpoints. A value between 15 and
Setting the value to 1/2 of total memory would be a normal conservative setting, and 3/4 of memory is a more aggressive but still reasonable amount.
+
\ No newline at end of file
diff --git a/stampede/documentation/docs/installation/previous-requirements.md b/documentation/docs/installation/previous-requirements.md
similarity index 52%
rename from stampede/documentation/docs/installation/previous-requirements.md
rename to documentation/docs/installation/previous-requirements.md
index eef48275..2bd16bb9 100644
--- a/stampede/documentation/docs/installation/previous-requirements.md
+++ b/documentation/docs/installation/previous-requirements.md
@@ -11,15 +11,6 @@ ToroDB Stampede's correct operation depends on a number of known dependencies, i
| PostgreSQL | ToroDB Stampede correct operation relies on the existence of a backend, right now it should be PostgreSQL. | [more info](https://wiki.postgresql.org/wiki/Detailed_installation_guides) |
| Java | ToroDB Stampede has been written in Java so a Java Virtual Machine is required for it's execution. | [more info](https://java.com/en/download/help/index_installing.xml) |
-Among the previous dependencies, if we want to compile the source code other requisites are mandatory.
-
-| | Description | External links |
-|-|-------------|----------------|
-| Git | It is the distributed version control system (DVCS) used to keep ToroDB Stampede source code up to date and synchronized between its committers. | [more info](https://git-scm.com/downloads) |
-| Maven | Dependency management and construction tasks has been delegated to Apache Maven, so it is necessary to compile the source code. | [more info](http://maven.apache.org/install.html) |
-| Docker | An open-source project that automates the deployment of Linux applications inside software containers. It allow to run a ToroDB Stampede and to test it in a controlled environment. | [more info](https://docs.docker.com/) |
-| Docker Compose | A tool for defining and running multi-container Docker applications. It allow to run test scenarios like a ToroDB Stampede replicating from a MongoDB and connected to a PostgreSQL. | [more info](https://docs.docker.com/compose/install/) |
-
## Backend setup
### PostgreSQL configuration
@@ -29,29 +20,43 @@ To work properly, the default installation of ToroDB Stampede requires a new use
#### Linux
```no-highlight
-$ createuser -S -R -D -P --interactive torodb
+createuser -S -R -D -P --interactive torodb
-$ createdatabase -O torodb torod
+createdatabase -O torodb torod
```
#### macOS/Windows
-In macOS and Windows the user and database can be created using an administration connection with `psql` command.
+In macOS and Windows the user and database can be created using an administration connection with `psql` command (do not forget to change `` with the chosen passowrd).
```no-highlight
-> CREATE USER torodb WITH PASSWORD '';
+CREATE USER torodb WITH PASSWORD '';
-> CREATE DATABASE torod OWNER torodb;
+CREATE DATABASE torod OWNER torodb;
```
### Create .toropass file
-The access configuration to the PostgreSQL database will be detailed in the `.toropass` file stored in the home directory. The example assumes local connection with default port is being used, but it can be changed by the user too.
+The access configuration to the PostgreSQL database will be detailed in the `.toropass` file stored in the home directory.
+The example assumes local connection with default port is being used, but it can be changed by the user too.
-#### Linux/macOS/Windows
-
-Create `.toropass` file in the home path with the content below.
+Create `.toropass` file in the home path with the content below (do not forget to change `` with the chosen passowrd).
```no-highlight
localhost:5432:torod:torodb:
```
+
+#### Linux/macOS
+
+```no-highlight
+read -s -p "Enter password:" PASSWORD
+echo
+echo "localhost:5432:torod:torodb:$PASSWORD" > "$HOME/.toropass"
+```
+
+#### Windows
+
+```no-highlight
+set PASSWORD=
+echo localhost:5432:torod:torodb:%PASSWORD%>%HOMEDRIVE%%HOMEPATH%\.toropass
+```
diff --git a/documentation/docs/installation/rpm.md b/documentation/docs/installation/rpm.md
new file mode 100644
index 00000000..2945ba04
--- /dev/null
+++ b/documentation/docs/installation/rpm.md
@@ -0,0 +1,88 @@
+Installation for Fedora/CentOS
+ToroDB Stampede can be installed from a COPR repository in two flavours:
+
+* torodb-stampede: in this package ToroDB Stampede comes without the backend dependency. This package is used when you have PostgreSQL installed in a different machine.
+* torodb-stampede-postgres: in this package ToroDB Stampede comes with a PostgreSQL dependency.
+This package is handy if you want to minimize configuration steps but have te requirements of install ToroDB Stamepde and PostgreSQL server in separate machines.
+
+## Install package torodb-stampede
+
+Just run as root user:
+
+### For Fedora 21 / CentOS
+
+```no-highlight
+yum -y install yum-plugin-copr
+yum -y copr enable eightkdata/torodb
+yum -y install torodb-stampede
+```
+
+### For Fedora >= 22
+
+```no-highlight
+dnf -y install dnf-plugins-core
+dnf -y copr enable eightkdata/torodb
+dnf -y install torodb-stampede
+```
+
+And then to setup ToroDB Stampede run interactive script as root user:
+
+```no-highlight
+torodb-stampede-setup
+```
+
+You will be prompted to provide superuser credentials (if you didn't created ToroDB's database and user yourself), ToroDB's user credentials and MongoDB credentials.
+
+!!! info "Manage ToroDB Stampede service"
+ To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service).
+
+## Install package torodb-stampede-postgres
+
+Just run as root user:
+
+### For Fedora 21 / CentOS
+
+```no-highlight
+yum -y install yum-plugin-copr
+yum -y copr enable eightkdata/torodb
+yum -y install torodb-stampede-postgres
+```
+
+### For Fedora >= 22
+
+```no-highlight
+dnf -y install dnf-plugins-core
+dnf -y copr enable eightkdata/torodb
+dnf -y install torodb-stampede-postgres
+```
+
+And then to setup ToroDB Stampede run interactive script as root user:
+
+```no-highlight
+torodb-stampede-setup
+```
+
+You will be prompted to provide MongoDB credentials.
+
+!!! info "Manage ToroDB Stampede service"
+ To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service).
+
+## Nightly build packages
+
+To install latest unstable nightly build packages just use torodb-dev repository (as root):
+
+### For Fedora 21 / CentOS
+
+```no-highlight
+yum -y install yum-plugin-copr
+yum -y copr enable eightkdata/torodb-dev
+yum -y install torodb-stampede
+```
+
+### For Fedora >= 22
+
+```no-highlight
+dnf -y install dnf-plugins-core
+dnf -y copr enable eightkdata/torodb-dev
+dnf -y install torodb-stampede
+```
diff --git a/documentation/docs/installation/snap.md b/documentation/docs/installation/snap.md
new file mode 100644
index 00000000..576b4c09
--- /dev/null
+++ b/documentation/docs/installation/snap.md
@@ -0,0 +1,53 @@
+Installation with SNAP
+ToroDB Stampede can be installed from SNAP public store in two flavours:
+
+* torodb-stampede: in this package ToroDB Stampede comes alone without the backend included so you will have to provide one.
+* torodb-stampede-postgres: in this package ToroDB Stampede comes with a PostgreSQL so you will be able to start using it with minimal configuration.
+
+## Install package torodb-stampede
+
+Just run:
+
+```
+sudo snap install torodb-stampede
+```
+
+And then to setup ToroDB Stampede run interactive script as root user:
+
+```
+sudo torodb-stampede.setup
+```
+
+You will be prompted to provide superuser credentials (if you didn't created ToroDB's database and user yourself), ToroDB's user credentials and MongoDB credentials.
+
+!!! info "Manage ToroDB Stampede service"
+ To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service).
+ Replace service name from *torodb-stampede* to *snap.torodb-stampede.daemon.service*.
+
+## Install package torodb-stampede-postgres
+
+Just run:
+
+```
+sudo snap install torodb-stampede-postgres
+```
+
+And then to setup ToroDB Stampede run interactive script as root user:
+
+```
+sudo torodb-stampede.setup
+```
+
+You will be prompted to provide MongoDB credentials.
+
+!!! info "Manage ToroDB Stampede service"
+ To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service).
+ Replace service name from *torodb-stampede* to *snap.torodb-stampede-postgres.daemon.service*.
+
+## Nightly build packages
+
+To install latest unstable nightly build packages just add `--edge` and `--devmode` parameter to `snap` command:
+
+```
+sudo snap install torodb-stampede --edge --devmode
+```
diff --git a/documentation/docs/installation/source-code.md b/documentation/docs/installation/source-code.md
new file mode 100644
index 00000000..8fa565ba
--- /dev/null
+++ b/documentation/docs/installation/source-code.md
@@ -0,0 +1,76 @@
+Installation from source code
+
+The installation from the source code is quite similar to the binary installation, but it is necessary to build ToroDB Stampede from the sources first.
+
+Among the dependencies you found in [previous requirements](previous-requirements.md#project-dependencies) section, if we want to compile the source code other requisites are mandatory.
+
+| | Description | External links |
+|-|-------------|----------------|
+| Git | It is the distributed version control system (DVCS) used to keep ToroDB Stampede source code up to date and synchronized between its committers. | [more info](https://git-scm.com/downloads) |
+| Maven | Dependency management and construction tasks has been delegated to Apache Maven, so it is necessary to compile the source code. | [more info](http://maven.apache.org/install.html) |
+| Docker | An open-source project that automates the deployment of Linux applications inside software containers. It allow to run a ToroDB Stampede and to test it in a controlled environment. | [more info](https://docs.docker.com/) |
+| Docker Compose | A tool for defining and running multi-container Docker applications. It allow to run test scenarios like a ToroDB Stampede replicating from a MongoDB and connected to a PostgreSQL. | [more info](https://docs.docker.com/compose/install/) |
+
+## Linux/macOS
+
+Download source code.
+
+```no-highlight
+cd /tmp
+
+git clone https://github.com/torodb/stampede.git
+```
+
+Compile source code.
+
+```no-highlight
+cd stampede
+
+mvn clean package -P assembler,prod
+```
+
+As explained in [previous requirements](previous-requirements.md#create-toropass-file) section, create `.toropass` file at current user home directory with the next content.
+
+```no-highlight
+echo "localhost:5432:torod:torodb:" > ~/.toropass
+```
+
+Extract and launch ToroDB Stampede (replace `$TOROHOME` with final ToroDB Stampede directory).
+
+```no-highlight
+cd "$TOROHOME"
+
+tar xjf "$TOROHOME/stampede/main/target/dist/torodb-stampede-1.0.0-beta2.tar.bz2"
+
+torodb-stampede-1.0.0-beta2/bin/torodb-stampede
+```
+
+## Windows
+
+Download source code in some temporal directory.
+
+```no-highlight
+C:\tmp\>git clone https://github.com/torodb/stampede.git
+```
+
+Compile source code.
+
+```no-highlight
+C:\tmp\>cd stampede
+
+C:\tmp\stampede>mvn clean package -P assembler,prod
+```
+
+As explained in [previous requirements](previous-requirements.md#create-toropass-file) section, create `.toropass` file at current user home directory `%HOME%\.toropass` with the next content.
+
+```no-highlight
+localhost:5432:torod:torodb:
+```
+
+Uncompress the Zip file located in `C:\tmp\torodb\stampede\main\target\dist\torodb-stampede-1.0.0-beta2.zip` in the final ToroDB Stampede directory (replace `%TOROHOME%` with final ToroDB Stampede directory), and then execute the command:
+
+```no-highlight
+C:\>%TOROHOME%\bin\torodb-stampede
+```
+
+or simply, double click on the `torodb-stampede.bat` file located in folder `bin`.
diff --git a/documentation/docs/metrics.md b/documentation/docs/metrics.md
new file mode 100644
index 00000000..1c07e7e7
--- /dev/null
+++ b/documentation/docs/metrics.md
@@ -0,0 +1,29 @@
+Metrics
+
+ToroDB Stampede exposes multiple metrics using JMX, some of them are custom metrics and other are metrics offered by third party products like Flexy-pool.
+
+## Flexy-pool metrics
+
+ToroDB Stampede uses Hikari as a connection pool, but it is wrapped with Flexy-pool, so the metrics exposed by Flexy-pool are available through JMX. So if a JMX console is used the following metrics are available.
+
+
+| Name | Description |
+|------|-------------|
+| concurrentConnectionsHistogram | A histogram of the number of concurrent connections. This indicates how many connections are being used at once. |
+| concurrentConnectionRequestsHistogram | A histogram of the number of concurrent connection requests. This indicates how many connection are being requested at once. |
+| connectionAcquireMillis | A time histogram for the target data source connection acquire interval. |
+| connectionLeaseMillis | A time histogram for the connection lease time. The lease time is the duration between the moment a connection is acquired and the time it gets released. |
+| maxPoolSizeHistogram | A histogram of the target pool size. The pool size might change if the IncrementPoolOnTimeoutConnectionAcquiringStrategy is being used. |
+| overallConnectionAcquireMillis | A time histogram for the total connection acquire interval. This is the connectionAcquireMillis plus the time spent by the connection acquire strategies. |
+| overflowPoolSizeHistogram | A histogram of the pool size overflowing. The pool size might overflow if the IncrementPoolOnTimeoutConnectionAcquiringStrategy is being used. |
+| retryAttemptsHistogram | A histogram of the retry attempts number. This is incremented by the RetryConnectionAcquiringStrategy. |
+
+Because ToroDB Stampede uses more than one connection pool, multiple space names will be avaible through the JMX console.
+
+| Spacename | Description |
+|-----------|-------------|
+| com.vladmihalcea.flexypool.metric.codehale.JmxMetricReporter.cursors | Read only connections used by the system. |
+| com.vladmihalcea.flexypool.metric.codehale.JmxMetricReporter.session | Connections used by the system to do the replication process from the MongoDB instance. |
+| com.vladmihalcea.flexypool.metric.codehale.JmxMetricReporter.system | Connections used by the system to do internal operations. |
+
+More information can be found in this [link](https://github.com/vladmihalcea/flexy-pool)
\ No newline at end of file
diff --git a/stampede/documentation/docs/quickstart.md b/documentation/docs/quickstart.md
similarity index 72%
rename from stampede/documentation/docs/quickstart.md
rename to documentation/docs/quickstart.md
index 59efc215..46a4273f 100644
--- a/stampede/documentation/docs/quickstart.md
+++ b/documentation/docs/quickstart.md
@@ -20,29 +20,29 @@ ToroDB Stampede expects some basic configuration for the relational backend. The
This steps can be done with the next commands in a Linux environment:
```no-highlight
-$ sudo -u postgres createuser -S -R -D -P --interactive torodb
+sudo -u postgres createuser -S -R -D -P --interactive torodb
-$ sudo -u postgres createdb -O torodb torod
+sudo -u postgres createdb -O torodb torod
```
The easiest way to check if the database can be used is connecting to it using the new role. If it is accessible then ToroDB Stampede should be able to do replication using it.
```no-highlight
-$ psql -U torodb torod
+psql -U torodb torod
```
## How to execute ToroDB Stampede binary distribution?
-To execute ToroDB Stampede the binary distribution is necessary and it can be downloaded from [here](https://www.dropbox.com/s/54eyp7jyu8l70aa/torodb-stampede-0.50.0-SNAPSHOT.tar.bz2?dl=0). After download and when file is uncompressed then ToroDB Stampede can be launched using the PostgreSQL connection information.
+To execute ToroDB Stampede the binary distribution is necessary and it can be downloaded from [here](https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.tar.bz2). After download and when file is uncompressed then ToroDB Stampede can be launched using the PostgreSQL connection information.
Following commands will allow ToroDB Stampede to be launched.
```no-highlight
-$ wget "https://www.dropbox.com/s/54eyp7jyu8l70aa/torodb-stampede-0.50.0-SNAPSHOT.tar.bz2?dl=0" -O torodb-stampede-0.50.0-SNAPSHOT.tar.bz2
+wget "https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.tar.bz2"
-$ tar xjf torodb-stampede-0.50.0-SNAPSHOT.tar.bz2
+tar xjf torodb-stampede-1.0.0-beta2.tar.bz2
-$ torodb-stampede-0.50.0-SNAPSHOT/bin/torodb-stampede --ask-for-password
+torodb-stampede-1.0.0-beta2/bin/torodb-stampede --ask-for-password
```
ToroDB Stampede will ask for the PostgreSQL torodb user's password to be provided. If all goes fine, ToroDB Stampede is up and running and it will be replicating the operations done in MongoDB.
@@ -51,18 +51,18 @@ ToroDB Stampede will ask for the PostgreSQL torodb user's password to be provide
It is easier to understand what ToroDB Stampede does through an example. One dataset will be imported in MongoDB and all data will be available in PostgreSQL thanks to Stampede replication.
-If previous steps are done and ToroDB Stampede is up and running, the dataset can be downloaded from [here](https://www.dropbox.com/s/570d4tyt4hpsn03/primer-dataset.json?dl=0) and the replication done using `mongoimport` command.
+If previous steps are done and ToroDB Stampede is up and running, the dataset can be downloaded from [here](https://www.torodb.com/download/primer-dataset.json) and the replication done using `mongoimport` command.
```no-highlight
-$ wget https://www.dropbox.com/s/570d4tyt4hpsn03/primer-dataset.json?dl=0
+wget https://www.torodb.com/download/primer-dataset.json
-$ mongoimport -d stampede -c primer primer-dataset.json
+mongoimport -d stampede -c primer primer-dataset.json
```
When `mongoimport` finished and replication complete PostgreSQL should have the replicated structure and data stored in the `stampede` schema, because that was the name selected for the database in the `mongoimport` command. Connecting to PostgreSQL console, the data can be accessed.
```no-highlight
-$ sudo -u torodb psql torod
+sudo -u torodb psql torod
> set schema 'stampede'
```
diff --git a/stampede/documentation/docs/real-app.md b/documentation/docs/real-app.md
similarity index 100%
rename from stampede/documentation/docs/real-app.md
rename to documentation/docs/real-app.md
diff --git a/stampede/documentation/docs/supported-backends.md b/documentation/docs/supported-backends.md
similarity index 100%
rename from stampede/documentation/docs/supported-backends.md
rename to documentation/docs/supported-backends.md
diff --git a/stampede/documentation/mkdocs.yml b/documentation/mkdocs.yml
similarity index 68%
rename from stampede/documentation/mkdocs.yml
rename to documentation/mkdocs.yml
index 7b061872..b96d7263 100644
--- a/stampede/documentation/mkdocs.yml
+++ b/documentation/mkdocs.yml
@@ -8,10 +8,15 @@ pages:
- 'Previous requirements': installation/previous-requirements.md
- 'Installation with Docker': installation/docker.md
- 'Installation with binaries': installation/binaries.md
+ - 'Installation with SNAP': installation/snap.md
+ - 'Installation for Ubuntu/Debian': installation/deb.md
+ - 'Installation for Fedora/CentOS': installation/rpm.md
- 'Installation from source code': installation/source-code.md
- 'Configuration': installation/configuration.md
- 'PostgreSQL configuration tips': installation/postgresql-configuration-tips.md
+ - 'Java Virtual Machine configuration tips': installation/jvm-configuration-tips.md
- 'How to use?': how-to-use.md
+ - 'Metrics': metrics.md
- 'FAQ': faq.md
- 'Glossary': glossary.md
- 'Appendix': appendix.md
@@ -19,3 +24,4 @@ extra_css:
- css/8kdata.css
markdown_extensions:
- admonition:
+google_analytics: ['UA-44578894-3', 'www.torodb.com']
\ No newline at end of file
diff --git a/engine/README.md b/engine/README.md
deleted file mode 100644
index 093bb8cf..00000000
--- a/engine/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# ToroDB Engine
-
-ToroDB engine is the core technology used by ToroDB Server and ToroDB Stampede.
diff --git a/engine/backend/common/pom.xml b/engine/backend/common/pom.xml
deleted file mode 100644
index c648716b..00000000
--- a/engine/backend/common/pom.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-
-
- 4.0.0
-
-
- com.torodb.engine.backend
- backend-pom
- 0.50.0
-
-
- backend-common
- ToroDB: Backend common
- jar
-
-
-
- org.apache.logging.log4j
- log4j-api
-
-
- com.google.guava
- guava
-
-
- com.google.code.findbugs
- annotations
-
-
- org.jooq
- jooq
-
-
- com.zaxxer
- HikariCP
-
-
- com.torodb.engine.kvdocument
- kvdocument-core
- ${project.version}
-
-
- com.torodb.engine
- metainfo-cache
- ${project.version}
-
-
- com.torodb.engine
- core
- ${project.version}
-
-
- javax.json
- javax.json-api
-
-
- javax.inject
- javax.inject
-
-
-
- junit
- junit
- test
-
-
- org.mockito
- mockito-core
- test
-
-
- com.torodb.engine.kvdocument
- json-converter
- ${project.version}
- test
-
-
- com.torodb.engine
- d2r
- ${project.version}
- test
-
-
- com.torodb.engine
- d2r
- ${project.version}
- test-jar
- test
-
-
- com.google.inject
- guice
-
-
- org.apache.logging.log4j
- log4j-core
-
-
- com.torodb.engine
- concurrent
- ${project.version}
-
-
- com.google.inject.extensions
- guice-assistedinject
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-jar-plugin
- 2.6
-
-
-
- test-jar
-
-
-
-
-
-
-
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractCursor.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractCursor.java
deleted file mode 100644
index 8cc2725a..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractCursor.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.core.cursors.Cursor;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.annotation.Nonnull;
-
-public abstract class AbstractCursor implements Cursor {
-
- public final ErrorHandler errorHandler;
- public final ResultSet resultSet;
- public boolean movedNext = false;
- public boolean hasNext = false;
-
- public AbstractCursor(@Nonnull ErrorHandler errorHandler, @Nonnull ResultSet resultSet) {
- this.errorHandler = errorHandler;
- this.resultSet = resultSet;
- }
-
- @Override
- public boolean hasNext() {
- try {
- if (!movedNext) {
- hasNext = resultSet.next();
- movedNext = true;
- }
-
- return hasNext;
- } catch (SQLException ex) {
- throw errorHandler.handleException(Context.FETCH, ex);
- }
- }
-
- @Override
- public T next() {
- try {
- hasNext();
- movedNext = false;
-
- return read(resultSet);
- } catch (SQLException ex) {
- throw errorHandler.handleException(Context.FETCH, ex);
- }
- }
-
- protected abstract T read(ResultSet resultSet) throws SQLException;
-
- @Override
- public void close() {
- try {
- resultSet.close();
- } catch (SQLException ex) {
- throw errorHandler.handleException(Context.FETCH, ex);
- }
- }
-
- @Override
- public List getNextBatch(final int maxSize) {
- List batch = new ArrayList<>();
-
- for (int index = 0; index < maxSize && hasNext(); index++) {
- batch.add(next());
- }
-
- return batch;
- }
-
- @Override
- public List getRemaining() {
- List batch = new ArrayList<>();
-
- while (hasNext()) {
- batch.add(next());
- }
-
- return batch;
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractDataTypeProvider.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractDataTypeProvider.java
deleted file mode 100644
index 93f15094..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractDataTypeProvider.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.collect.ImmutableMap;
-import com.torodb.backend.converters.jooq.DataTypeForKv;
-import com.torodb.core.transaction.metainf.FieldType;
-
-import javax.inject.Singleton;
-
-/**
- *
- */
-@Singleton
-public abstract class AbstractDataTypeProvider implements DataTypeProvider {
-
- private final ImmutableMap> dataTypes;
-
- protected AbstractDataTypeProvider(ImmutableMap> dataTypes) {
- this.dataTypes = ImmutableMap.>builder()
- .putAll(dataTypes)
- .build();
-
- //Check that all data types are specified or throw IllegalArgumentException
- for (FieldType fieldType : FieldType.values()) {
- getDataType(fieldType);
- }
- }
-
- @Override
- public DataTypeForKv> getDataType(FieldType type) {
- DataTypeForKv> dataType = dataTypes.get(type);
- if (dataType == null) {
- throw new IllegalArgumentException("It is not defined how to map elements of type " + type
- + " to SQL");
- }
- return dataType;
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractDbBackendService.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractDbBackendService.java
deleted file mode 100644
index adcc87c6..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractDbBackendService.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.base.Preconditions;
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.core.annotations.TorodbIdleService;
-import com.torodb.core.services.IdleTorodbService;
-import com.zaxxer.hikari.HikariConfig;
-import com.zaxxer.hikari.HikariDataSource;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.concurrent.ThreadFactory;
-
-import javax.annotation.Nonnull;
-import javax.sql.DataSource;
-
-/**
- *
- */
-public abstract class AbstractDbBackendService
- extends IdleTorodbService implements DbBackendService {
-
- private static final Logger LOGGER = LogManager.getLogger(AbstractDbBackendService.class);
-
- public static final int SYSTEM_DATABASE_CONNECTIONS = 1;
- public static final int MIN_READ_CONNECTIONS_DATABASE = 1;
- public static final int MIN_SESSION_CONNECTIONS_DATABASE = 2;
- public static final int MIN_CONNECTIONS_DATABASE = SYSTEM_DATABASE_CONNECTIONS
- + MIN_READ_CONNECTIONS_DATABASE
- + MIN_SESSION_CONNECTIONS_DATABASE;
-
- private final ConfigurationT configuration;
- private final ErrorHandler errorHandler;
- private HikariDataSource writeDataSource;
- private HikariDataSource systemDataSource;
- private HikariDataSource readOnlyDataSource;
- /**
- * Global state variable for data import mode. If true data import mode is enabled, data import
- * mode is otherwise disabled. Indexes will not be created while data import mode is enabled. When
- * this mode is enabled importing data will be faster.
- */
- private volatile boolean dataImportMode;
-
- /**
- * Configure the backend. The contract specifies that any subclass must call initialize() method
- * after properly constructing the object.
- *
- * @param threadFactory the thread factory that will be used to create the startup and shutdown
- * threads
- * @param configuration
- * @param errorHandler
- */
- public AbstractDbBackendService(@TorodbIdleService ThreadFactory threadFactory,
- ConfigurationT configuration, ErrorHandler errorHandler) {
- super(threadFactory);
- this.configuration = configuration;
- this.errorHandler = errorHandler;
- this.dataImportMode = false;
-
- int connectionPoolSize = configuration.getConnectionPoolSize();
- int reservedReadPoolSize = configuration.getReservedReadPoolSize();
- Preconditions.checkState(
- connectionPoolSize >= MIN_CONNECTIONS_DATABASE,
- "At least " + MIN_CONNECTIONS_DATABASE
- + " total connections with the backend SQL database are required"
- );
- Preconditions.checkState(
- reservedReadPoolSize >= MIN_READ_CONNECTIONS_DATABASE,
- "At least " + MIN_READ_CONNECTIONS_DATABASE + " read connection(s) is(are) required"
- );
- Preconditions.checkState(
- connectionPoolSize - reservedReadPoolSize >= MIN_SESSION_CONNECTIONS_DATABASE,
- "Reserved read connections must be lower than total connections minus "
- + MIN_SESSION_CONNECTIONS_DATABASE
- );
- }
-
- @Override
- protected void startUp() throws Exception {
- int reservedReadPoolSize = configuration.getReservedReadPoolSize();
-
- writeDataSource = createPooledDataSource(
- configuration, "session",
- configuration.getConnectionPoolSize() - reservedReadPoolSize - SYSTEM_DATABASE_CONNECTIONS,
- getCommonTransactionIsolation(),
- false
- );
- systemDataSource = createPooledDataSource(
- configuration, "system",
- SYSTEM_DATABASE_CONNECTIONS,
- getSystemTransactionIsolation(),
- false);
- readOnlyDataSource = createPooledDataSource(
- configuration, "cursors",
- reservedReadPoolSize,
- getGlobalCursorTransactionIsolation(),
- true);
- }
-
- @Override
- @SuppressFBWarnings(value = "UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR",
- justification =
- "Object lifecyle is managed as a Service. Datasources are initialized in setup method")
- protected void shutDown() throws Exception {
- writeDataSource.close();
- systemDataSource.close();
- readOnlyDataSource.close();
- }
-
- @Nonnull
- protected abstract TransactionIsolationLevel getCommonTransactionIsolation();
-
- @Nonnull
- protected abstract TransactionIsolationLevel getSystemTransactionIsolation();
-
- @Nonnull
- protected abstract TransactionIsolationLevel getGlobalCursorTransactionIsolation();
-
- private HikariDataSource createPooledDataSource(
- ConfigurationT configuration, String poolName, int poolSize,
- TransactionIsolationLevel transactionIsolationLevel,
- boolean readOnly
- ) {
- HikariConfig hikariConfig = new HikariConfig();
-
- // Delegate database-specific setting of connection parameters and any specific configuration
- hikariConfig.setDataSource(getConfiguredDataSource(configuration, poolName));
-
- // Apply ToroDB-specific datasource configuration
- hikariConfig.setConnectionTimeout(configuration.getConnectionPoolTimeout());
- hikariConfig.setPoolName(poolName);
- hikariConfig.setMaximumPoolSize(poolSize);
- hikariConfig.setTransactionIsolation(transactionIsolationLevel.name());
- hikariConfig.setReadOnly(readOnly);
- /*
- * TODO: implement to add metric support. See
- * https://github.com/brettwooldridge/HikariCP/wiki/Codahale-Metrics
- * hikariConfig.setMetricRegistry(...);
- */
-
- LOGGER.info("Created pool {} with size {} and level {}", poolName, poolSize,
- transactionIsolationLevel.name());
-
- return new HikariDataSource(hikariConfig);
- }
-
- protected abstract DataSource getConfiguredDataSource(ConfigurationT configuration,
- String poolName);
-
- @Override
- public void disableDataInsertMode() {
- this.dataImportMode = false;
- }
-
- @Override
- public void enableDataInsertMode() {
- this.dataImportMode = true;
- }
-
- @Override
- public DataSource getSessionDataSource() {
- checkState();
-
- return writeDataSource;
- }
-
- @Override
- public DataSource getSystemDataSource() {
- checkState();
-
- return systemDataSource;
- }
-
- @Override
- public DataSource getGlobalCursorDatasource() {
- checkState();
-
- return readOnlyDataSource;
- }
-
- protected void checkState() {
- if (!isRunning()) {
- throw new IllegalStateException("The " + serviceName() + " is not running");
- }
- }
-
- @Override
- public long getDefaultCursorTimeout() {
- return configuration.getCursorTimeout();
- }
-
- @Override
- public boolean isOnDataInsertMode() {
- return dataImportMode;
- }
-
- @Override
- public boolean includeForeignKeys() {
- return configuration.includeForeignKeys();
- }
-
- protected void postConsume(Connection connection, boolean readOnly) throws SQLException {
- connection.setReadOnly(readOnly);
- if (!connection.isValid(500)) {
- throw new RuntimeException("DB connection is not valid");
- }
- connection.setAutoCommit(false);
- }
-
- private Connection consumeConnection(DataSource ds, boolean readOnly) {
- checkState();
-
- try {
- Connection c = ds.getConnection();
- postConsume(c, readOnly);
-
- return c;
- } catch (SQLException ex) {
- throw errorHandler.handleException(Context.GET_CONNECTION, ex);
- }
- }
-
- @Override
- public Connection createSystemConnection() {
- checkState();
-
- return consumeConnection(systemDataSource, false);
- }
-
- @Override
- public Connection createReadOnlyConnection() {
- checkState();
-
- return consumeConnection(readOnlyDataSource, true);
- }
-
- @Override
- public Connection createWriteConnection() {
- checkState();
-
- return consumeConnection(writeDataSource, false);
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractErrorHandler.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractErrorHandler.java
deleted file mode 100644
index 5dce399a..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractErrorHandler.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.torodb.backend.exceptions.BackendException;
-import com.torodb.core.exceptions.ToroRuntimeException;
-import com.torodb.core.exceptions.user.UserException;
-import com.torodb.core.transaction.RollbackException;
-import org.jooq.exception.DataAccessException;
-
-import java.sql.SQLException;
-import java.util.Optional;
-import java.util.function.Function;
-
-import javax.inject.Singleton;
-
-/**
- *
- */
-@Singleton
-public abstract class AbstractErrorHandler implements ErrorHandler {
-
- private final ImmutableList rollbackRules;
- private final ImmutableList userRules;
-
- protected AbstractErrorHandler(Rule... rules) {
- ImmutableList.Builder rollbackRulesBuilder =
- ImmutableList.builder();
- ImmutableList.Builder userRulesBuilder =
- ImmutableList.builder();
-
- for (Rule rule : rules) {
- if (rule instanceof RollbackRule) {
- rollbackRulesBuilder.add((RollbackRule) rule);
- } else if (rule instanceof UserRule) {
- userRulesBuilder.add((UserRule) rule);
- }
- }
-
- this.rollbackRules = rollbackRulesBuilder.build();
- this.userRules = userRulesBuilder.build();
- }
-
- @Override
- public ToroRuntimeException handleException(Context context, SQLException sqlException) throws
- RollbackException {
- try {
- return handleUserException(context, sqlException);
- } catch (UserException userException) {
- return new BackendException(context, sqlException);
- }
- }
-
- @Override
- public ToroRuntimeException handleException(Context context,
- DataAccessException dataAccessException) throws RollbackException {
- try {
- return handleUserException(context, dataAccessException);
- } catch (UserException userException) {
- return new BackendException(context, dataAccessException);
- }
- }
-
- @Override
- public ToroRuntimeException handleUserException(Context context, SQLException sqlException) throws
- UserException, RollbackException {
- if (applyToUserRule(context, sqlException.getSQLState())) {
- throw createUserException(context, sqlException.getSQLState(), new BackendException(context,
- sqlException));
- }
-
- if (applyToRollbackRule(context, sqlException.getSQLState())) {
- throw new RollbackException(sqlException);
- }
-
- return new BackendException(context, sqlException);
- }
-
- @Override
- public ToroRuntimeException handleUserException(Context context,
- DataAccessException dataAccessException) throws UserException, RollbackException {
- if (applyToUserRule(context, dataAccessException.sqlState())) {
- throw createUserException(context, dataAccessException.sqlState(), new BackendException(
- context, dataAccessException));
- }
-
- if (applyToRollbackRule(context, dataAccessException.sqlState())) {
- throw new RollbackException(dataAccessException);
- }
-
- return new BackendException(context, dataAccessException);
- }
-
- private boolean applyToRollbackRule(Context context, String sqlState) {
- return rollbackRules.stream()
- .anyMatch(r ->
- r.getSqlCode().equals(sqlState) && (r.getContexts().isEmpty() || r.getContexts()
- .contains(context)));
- }
-
- private boolean applyToUserRule(Context context, String sqlState) {
- return userRules.stream()
- .anyMatch(r ->
- r.getSqlCode().equals(sqlState) && (r.getContexts().isEmpty() || r.getContexts()
- .contains(context)));
- }
-
- private UserException createUserException(Context context, String sqlState,
- BackendException backendException) {
- Optional userRule = userRules.stream()
- .filter(r ->
- r.getSqlCode().equals(sqlState) && (r.getContexts().isEmpty() || r.getContexts()
- .contains(context)))
- .findFirst();
- if (userRule.isPresent()) {
- return userRule.get().translate(backendException);
- }
-
- throw new IllegalArgumentException("User exception not found for context " + context
- + " and sqlState " + sqlState);
- }
-
- protected static Rule rollbackRule(String sqlCode, Context... contexts) {
- return new RollbackRule(sqlCode, contexts);
- }
-
- protected static Rule userRule(
- String sqlCode, Function translateFunction,
- Context... contexts) {
- return new UserRule(sqlCode, contexts, translateFunction);
- }
-
- protected abstract static class Rule {
-
- private final String sqlCode;
- private final ImmutableSet contexts;
-
- private Rule(String code, Context[] contexts) {
- this.sqlCode = code;
- this.contexts = ImmutableSet.copyOf(contexts);
- }
-
- public String getSqlCode() {
- return sqlCode;
- }
-
- public ImmutableSet getContexts() {
- return contexts;
- }
- }
-
- protected static class RollbackRule extends Rule {
-
- private RollbackRule(String code, Context[] contexts) {
- super(code, contexts);
- }
- }
-
- protected static class UserRule extends Rule {
-
- private final Function translateFunction;
-
- private UserRule(String code, Context[] contexts,
- Function translateFunction) {
- super(code, contexts);
-
- this.translateFunction = translateFunction;
- }
-
- public UserException translate(BackendException backendException) {
- return translateFunction.apply(backendException);
- }
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractIdentifierConstraints.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractIdentifierConstraints.java
deleted file mode 100644
index e878f783..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractIdentifierConstraints.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Maps;
-import com.torodb.backend.meta.TorodbSchema;
-import com.torodb.backend.tables.MetaDocPartTable.DocPartTableFields;
-import com.torodb.core.backend.IdentifierConstraints;
-import com.torodb.core.exceptions.SystemException;
-import com.torodb.core.transaction.metainf.FieldType;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import javax.annotation.Nonnull;
-import javax.inject.Singleton;
-
-@Singleton
-public abstract class AbstractIdentifierConstraints implements IdentifierConstraints {
-
- private static final char SEPARATOR = '_';
- private static final char ARRAY_DIMENSION_SEPARATOR = '$';
-
- private final ImmutableMap fieldTypeIdentifiers;
- private final ImmutableMap scalarFieldTypeIdentifiers;
- private final ImmutableSet restrictedSchemaNames;
- private final ImmutableSet restrictedColumnNames;
-
- protected AbstractIdentifierConstraints(ImmutableSet restrictedSchemaNames,
- ImmutableSet restrictedColumnNames) {
- this.fieldTypeIdentifiers = Maps.immutableEnumMap(ImmutableMap.builder()
- .put(FieldType.BINARY, 'r') // [r]aw bytes
- .put(FieldType.BOOLEAN, 'b') // [b]oolean
- .put(FieldType.DOUBLE, 'd') // [d]ouble
- .put(FieldType.INSTANT, 't') // [t]imestamp
- .put(FieldType.INTEGER, 'i') // [i]nteger
- .put(FieldType.LONG, 'l') // [l]ong
- .put(FieldType.NULL, 'n') // [n]ull
- .put(FieldType.STRING, 's') // [s]tring
- .put(FieldType.CHILD, 'e') // child [e]lement
-
- // Mongo types
- .put(FieldType.MONGO_OBJECT_ID, 'x')
- .put(FieldType.MONGO_TIME_STAMP, 'y')
- // No-Mongo types
- .put(FieldType.DATE, 'c') // [c]alendar
- .put(FieldType.TIME, 'm') // ti[m]e
-
- .build());
-
- ImmutableMap.Builder scalarFieldTypeIdentifiersBuilder =
- ImmutableMap.builder();
- Set fieldTypeIdentifierSet = new HashSet<>();
- for (FieldType fieldType : FieldType.values()) {
- if (!this.fieldTypeIdentifiers.containsKey(fieldType)) {
- throw new SystemException("FieldType " + fieldType
- + " has not been mapped to an identifier.");
- }
-
- char identifier = this.fieldTypeIdentifiers.get(fieldType);
-
- if ((identifier < 'a' || identifier > 'z') && (identifier < '0' || identifier > '9')) {
- throw new SystemException("FieldType " + fieldType + " has an unallowed identifier "
- + identifier);
- }
-
- if (fieldTypeIdentifierSet.contains(identifier)) {
- throw new SystemException("FieldType " + fieldType + " identifier "
- + identifier + " was used by another FieldType.");
- }
-
- fieldTypeIdentifierSet.add(identifier);
-
- scalarFieldTypeIdentifiersBuilder.put(fieldType, DocPartTableFields.SCALAR.fieldName
- + SEPARATOR + identifier);
- }
-
- this.scalarFieldTypeIdentifiers = Maps.immutableEnumMap(scalarFieldTypeIdentifiersBuilder
- .build());
-
- this.restrictedSchemaNames = ImmutableSet.builder()
- .add(TorodbSchema.IDENTIFIER)
- .addAll(restrictedSchemaNames)
- .build();
-
- this.restrictedColumnNames = ImmutableSet.builder()
- .add(DocPartTableFields.DID.fieldName)
- .add(DocPartTableFields.RID.fieldName)
- .add(DocPartTableFields.PID.fieldName)
- .add(DocPartTableFields.SEQ.fieldName)
- .addAll(scalarFieldTypeIdentifiers.values())
- .addAll(restrictedColumnNames)
- .build();
- }
-
- @Override
- public char getSeparator() {
- return SEPARATOR;
- }
-
- @Override
- public char getArrayDimensionSeparator() {
- return ARRAY_DIMENSION_SEPARATOR;
- }
-
- @Override
- public boolean isAllowedSchemaIdentifier(@Nonnull String schemaName) {
- return !restrictedSchemaNames.contains(schemaName);
- }
-
- @Override
- public boolean isAllowedTableIdentifier(@Nonnull String columnName) {
- return true;
- }
-
- @Override
- public boolean isAllowedColumnIdentifier(@Nonnull String columnName) {
- return !restrictedColumnNames.contains(columnName);
- }
-
- @Override
- public boolean isAllowedIndexIdentifier(@Nonnull String indexName) {
- return true;
- }
-
- @Override
- public boolean isSameIdentifier(@Nonnull String leftIdentifier, @Nonnull String rightIdentifier) {
- return leftIdentifier.equals(rightIdentifier);
- //leftIdentifier.toLowerCase(Locale.US).equals(rightIdentifier.toLowerCase(Locale.US));
- }
-
- @Override
- public char getFieldTypeIdentifier(FieldType fieldType) {
- return fieldTypeIdentifiers.get(fieldType);
- }
-
- @Override
- public String getScalarIdentifier(FieldType fieldType) {
- return scalarFieldTypeIdentifiers.get(fieldType);
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataReadInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataReadInterface.java
deleted file mode 100644
index 9bce79c0..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataReadInterface.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.base.Preconditions;
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.backend.tables.KvTable;
-import com.torodb.backend.tables.MetaDocPartTable;
-import com.torodb.backend.tables.records.KvRecord;
-import com.torodb.backend.tables.records.MetaDatabaseRecord;
-import com.torodb.core.TableRef;
-import com.torodb.core.backend.MetaInfoKey;
-import com.torodb.core.transaction.metainf.MetaCollection;
-import com.torodb.core.transaction.metainf.MetaDatabase;
-import com.torodb.core.transaction.metainf.MetaDocPart;
-import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex;
-import com.torodb.core.transaction.metainf.MetaIndex;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.jooq.Condition;
-import org.jooq.DSLContext;
-import org.jooq.Record;
-import org.jooq.Record1;
-import org.jooq.Result;
-
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Optional;
-import java.util.stream.Stream;
-
-import javax.annotation.Nonnull;
-import javax.inject.Inject;
-import javax.inject.Singleton;
-
-/**
- *
- */
-@Singleton
-@SuppressFBWarnings("SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING")
-public abstract class AbstractMetaDataReadInterface implements MetaDataReadInterface {
-
- private final MetaDocPartTable, ?> metaDocPartTable;
- private final SqlHelper sqlHelper;
-
- @Inject
- public AbstractMetaDataReadInterface(MetaDocPartTable, ?> metaDocPartTable,
- SqlHelper sqlHelper) {
- this.metaDocPartTable = metaDocPartTable;
- this.sqlHelper = sqlHelper;
- }
-
- @Override
- public long getDatabaseSize(
- @Nonnull DSLContext dsl,
- @Nonnull MetaDatabase database
- ) {
- String statement = getReadSchemaSizeStatement(database.getIdentifier());
- Result result = sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH,
- ps -> {
- ps.setString(1, database.getName());
- }
- );
-
- if (result.isEmpty()) {
- return 0;
- }
-
- Long resultSize = result.get(0).into(Long.class);
-
- if (resultSize == null) {
- return 0;
- }
-
- return resultSize;
- }
-
- protected abstract String getReadSchemaSizeStatement(String databaseName);
-
- @Override
- public long getCollectionSize(
- @Nonnull DSLContext dsl,
- @Nonnull MetaDatabase database,
- @Nonnull MetaCollection collection
- ) {
- String statement = getReadCollectionSizeStatement();
- return sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH,
- ps -> {
- ps.setString(1, database.getName());
- ps.setString(2, database.getIdentifier());
- ps.setString(3, collection.getName());
- })
- .get(0)
- .into(Long.class);
- }
-
- protected abstract String getReadCollectionSizeStatement();
-
- @Override
- public long getDocumentsSize(
- @Nonnull DSLContext dsl,
- @Nonnull MetaDatabase database,
- @Nonnull MetaCollection collection
- ) {
- String statement = getReadDocumentsSizeStatement();
- return sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH,
- ps -> {
- ps.setString(1, database.getName());
- ps.setString(2, database.getIdentifier());
- ps.setString(3, collection.getName());
- })
- .get(0)
- .into(Long.class);
- }
-
- protected abstract String getReadDocumentsSizeStatement();
-
- @Override
- public Long getIndexSize(
- @Nonnull DSLContext dsl, @Nonnull MetaDatabase database,
- @Nonnull MetaCollection collection, @Nonnull String indexName) {
- long result = 0;
- MetaIndex index = collection.getMetaIndexByName(indexName);
- Iterator tableRefIterator = index.streamTableRefs().iterator();
- while (tableRefIterator.hasNext()) {
- TableRef tableRef = tableRefIterator.next();
- MetaDocPart docPart = collection.getMetaDocPartByTableRef(tableRef);
- Iterator extends MetaIdentifiedDocPartIndex> docPartIndexIterator = docPart.streamIndexes()
- .iterator();
- while (docPartIndexIterator.hasNext()) {
- MetaIdentifiedDocPartIndex docPartIndex = docPartIndexIterator.next();
- if (index.isCompatible(docPart, docPartIndex)) {
- long relatedIndexCount = collection.streamContainedMetaIndexes()
- .filter(i -> i.isCompatible(docPart, docPartIndex)).count();
- String statement = getReadIndexSizeStatement(database.getIdentifier(),
- docPart.getIdentifier(), docPartIndex.getIdentifier());
- result += sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH)
- .get(0).into(Long.class) / relatedIndexCount;
- }
- }
- }
- return result;
- }
-
- protected abstract String getReadIndexSizeStatement(
- String schemaName, String tableName, String indexName);
-
- @Override
- public Collection> getInternalFields(MetaDocPart metaDocPart) {
- TableRef tableRef = metaDocPart.getTableRef();
- return getInternalFields(tableRef);
- }
-
- @Override
- public Collection> getInternalFields(TableRef tableRef) {
- if (tableRef.isRoot()) {
- return metaDocPartTable.ROOT_FIELDS;
- } else if (tableRef.getParent().get().isRoot()) {
- return metaDocPartTable.FIRST_FIELDS;
- }
- return metaDocPartTable.FIELDS;
- }
-
- @Override
- public Collection> getPrimaryKeyInternalFields(TableRef tableRef) {
- if (tableRef.isRoot()) {
- return metaDocPartTable.PRIMARY_KEY_ROOT_FIELDS;
- } else if (tableRef.getParent().get().isRoot()) {
- return metaDocPartTable.PRIMARY_KEY_FIRST_FIELDS;
- }
- return metaDocPartTable.PRIMARY_KEY_FIELDS;
- }
-
- @Override
- public Collection> getReferenceInternalFields(TableRef tableRef) {
- Preconditions.checkArgument(!tableRef.isRoot());
- if (tableRef.getParent().get().isRoot()) {
- return metaDocPartTable.REFERENCE_FIRST_FIELDS;
- }
- return metaDocPartTable.REFERENCE_FIELDS;
- }
-
- @Override
- public Collection> getForeignInternalFields(TableRef tableRef) {
- Preconditions.checkArgument(!tableRef.isRoot());
- TableRef parentTableRef = tableRef.getParent().get();
- if (parentTableRef.isRoot()) {
- return metaDocPartTable.FOREIGN_ROOT_FIELDS;
- } else if (parentTableRef.getParent().get().isRoot()) {
- return metaDocPartTable.FOREIGN_FIRST_FIELDS;
- }
- return metaDocPartTable.FOREIGN_FIELDS;
- }
-
- @Override
- public Collection> getReadInternalFields(MetaDocPart metaDocPart) {
- TableRef tableRef = metaDocPart.getTableRef();
- return getReadInternalFields(tableRef);
- }
-
- @Override
- public Collection> getReadInternalFields(TableRef tableRef) {
- if (tableRef.isRoot()) {
- return metaDocPartTable.READ_ROOT_FIELDS;
- } else if (tableRef.getParent().get().isRoot()) {
- return metaDocPartTable.READ_FIRST_FIELDS;
- }
- return metaDocPartTable.READ_FIELDS;
- }
-
- @Override
- public Optional readKv(DSLContext dsl, MetaInfoKey key) {
- KvTable kvTable = getKvTable();
- Condition c = kvTable.KEY.eq(key.getKeyName());
-
- return dsl.select(kvTable.VALUE)
- .from(kvTable)
- .where(c)
- .fetchOptional()
- .map(Record1::value1);
- }
-
- @Override
- public Stream readMetaDatabaseTable(DSLContext dsl) {
- return dsl.selectFrom(getMetaDatabaseTable())
- .fetchStream();
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataWriteInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataWriteInterface.java
deleted file mode 100644
index 3d153992..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataWriteInterface.java
+++ /dev/null
@@ -1,554 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.backend.tables.KvTable;
-import com.torodb.backend.tables.MetaCollectionTable;
-import com.torodb.backend.tables.MetaDatabaseTable;
-import com.torodb.backend.tables.MetaDocPartIndexColumnTable;
-import com.torodb.backend.tables.MetaDocPartIndexTable;
-import com.torodb.backend.tables.MetaDocPartTable;
-import com.torodb.backend.tables.MetaFieldTable;
-import com.torodb.backend.tables.MetaIndexFieldTable;
-import com.torodb.backend.tables.MetaIndexTable;
-import com.torodb.backend.tables.MetaScalarTable;
-import com.torodb.core.TableRef;
-import com.torodb.core.backend.MetaInfoKey;
-import com.torodb.core.transaction.metainf.FieldIndexOrdering;
-import com.torodb.core.transaction.metainf.FieldType;
-import com.torodb.core.transaction.metainf.MetaCollection;
-import com.torodb.core.transaction.metainf.MetaDatabase;
-import com.torodb.core.transaction.metainf.MetaDocPart;
-import com.torodb.core.transaction.metainf.MetaDocPartIndexColumn;
-import com.torodb.core.transaction.metainf.MetaField;
-import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex;
-import com.torodb.core.transaction.metainf.MetaIndex;
-import com.torodb.core.transaction.metainf.MetaIndexField;
-import com.torodb.core.transaction.metainf.MetaScalar;
-import org.jooq.Condition;
-import org.jooq.DSLContext;
-import org.jooq.Record1;
-import org.jooq.TableField;
-import org.jooq.conf.ParamType;
-
-import java.util.Optional;
-
-import javax.inject.Singleton;
-
-@Singleton
-public abstract class AbstractMetaDataWriteInterface implements MetaDataWriteInterface {
-
- private final MetaDatabaseTable> metaDatabaseTable;
- private final MetaCollectionTable> metaCollectionTable;
- private final MetaDocPartTable, ?> metaDocPartTable;
- private final MetaFieldTable, ?> metaFieldTable;
- private final MetaScalarTable, ?> metaScalarTable;
- private final MetaIndexTable> metaIndexTable;
- private final MetaIndexFieldTable, ?> metaIndexFieldTable;
- private final MetaDocPartIndexTable, ?> metaDocPartIndexTable;
- private final MetaDocPartIndexColumnTable, ?> metaDocPartIndexColumnTable;
- private final KvTable> kvTable;
- private final SqlHelper sqlHelper;
-
- public AbstractMetaDataWriteInterface(MetaDataReadInterface metaDataReadInterface,
- SqlHelper sqlHelper) {
- this.metaDatabaseTable = metaDataReadInterface.getMetaDatabaseTable();
- this.metaCollectionTable = metaDataReadInterface.getMetaCollectionTable();
- this.metaDocPartTable = metaDataReadInterface.getMetaDocPartTable();
- this.metaFieldTable = metaDataReadInterface.getMetaFieldTable();
- this.metaScalarTable = metaDataReadInterface.getMetaScalarTable();
- this.metaIndexTable = metaDataReadInterface.getMetaIndexTable();
- this.metaIndexFieldTable = metaDataReadInterface.getMetaIndexFieldTable();
- this.metaDocPartIndexTable = metaDataReadInterface.getMetaDocPartIndexTable();
- this.metaDocPartIndexColumnTable = metaDataReadInterface.getMetaDocPartIndexColumnTable();
- this.kvTable = metaDataReadInterface.getKvTable();
- this.sqlHelper = sqlHelper;
- }
-
- @Override
- public void createMetaDatabaseTable(DSLContext dsl) {
- String schemaName = metaDatabaseTable.getSchema().getName();
- String tableName = metaDatabaseTable.getName();
- String statement = getCreateMetaDatabaseTableStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetaDatabaseTableStatement(String schemaName,
- String tableName);
-
- @Override
- public void createMetaCollectionTable(DSLContext dsl) {
- String schemaName = metaCollectionTable.getSchema().getName();
- String tableName = metaCollectionTable.getName();
- String statement = getCreateMetaCollectionTableStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetaCollectionTableStatement(String schemaName,
- String tableName);
-
- @Override
- public void createMetaDocPartTable(DSLContext dsl) {
- String schemaName = metaDocPartTable.getSchema().getName();
- String tableName = metaDocPartTable.getName();
- String statement = getCreateMetaDocPartTableStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetaDocPartTableStatement(String schemaName, String tableName);
-
- @Override
- public void createMetaFieldTable(DSLContext dsl) {
- String schemaName = metaFieldTable.getSchema().getName();
- String tableName = metaFieldTable.getName();
- String statement = getCreateMetaFieldTableStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetaFieldTableStatement(String schemaName, String tableName);
-
- @Override
- public void createMetaScalarTable(DSLContext dsl) {
- String schemaName = metaScalarTable.getSchema().getName();
- String tableName = metaScalarTable.getName();
- String statement = getCreateMetaScalarTableStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetaScalarTableStatement(String schemaName, String tableName);
-
- @Override
- public void createMetaIndexTable(DSLContext dsl) {
- String schemaName = metaIndexTable.getSchema().getName();
- String tableName = metaIndexTable.getName();
- String statement = getCreateMetaIndexTableStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetaIndexTableStatement(String schemaName, String tableName);
-
- @Override
- public void createMetaIndexFieldTable(DSLContext dsl) {
- String schemaName = metaIndexFieldTable.getSchema().getName();
- String tableName = metaIndexFieldTable.getName();
- String statement = getCreateMetaIndexFieldTableStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetaIndexFieldTableStatement(String schemaName,
- String tableName);
-
- @Override
- public void createMetaDocPartIndexTable(DSLContext dsl) {
- String schemaName = metaDocPartIndexTable.getSchema().getName();
- String tableName = metaDocPartIndexTable.getName();
- String statement = getCreateMetaDocPartIndexTableStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetaDocPartIndexTableStatement(String schemaName,
- String tableName);
-
- @Override
- public void createMetaFieldIndexTable(DSLContext dsl) {
- String schemaName = metaDocPartIndexColumnTable.getSchema().getName();
- String tableName = metaDocPartIndexColumnTable.getName();
- String statement = getCreateMetaDocPartIndexColumnTableStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetaDocPartIndexColumnTableStatement(String schemaName,
- String tableName);
-
- @Override
- public void createKvTable(DSLContext dsl) {
- String schemaName = kvTable.getSchema().getName();
- String tableName = kvTable.getName();
- String statement = getCreateMetainfStatement(schemaName, tableName);
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateMetainfStatement(String schemaName, String tableName);
-
- @Override
- public void addMetaDatabase(DSLContext dsl, MetaDatabase database) {
- String statement = getAddMetaDatabaseStatement(database.getName(), database.getIdentifier());
- sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT);
- }
-
- @Override
- public void addMetaCollection(DSLContext dsl, MetaDatabase database, MetaCollection collection) {
- String statement = getAddMetaCollectionStatement(database.getName(), collection.getName(),
- collection.getIdentifier());
- sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT);
- }
-
- @Override
- public void addMetaDocPart(DSLContext dsl, MetaDatabase database, MetaCollection collection,
- MetaDocPart docPart) {
- String statement = getAddMetaDocPartStatement(database.getName(), collection.getName(), docPart
- .getTableRef(),
- docPart.getIdentifier());
- sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT);
- }
-
- @Override
- public void addMetaField(DSLContext dsl, MetaDatabase database, MetaCollection collection,
- MetaDocPart docPart, MetaField field) {
- String statement = getAddMetaFieldStatement(database.getName(), collection.getName(), docPart
- .getTableRef(),
- field.getName(), field.getIdentifier(),
- field.getType());
- sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT);
- }
-
- @Override
- public void addMetaScalar(DSLContext dsl, MetaDatabase database, MetaCollection collection,
- MetaDocPart docPart, MetaScalar scalar) {
- String statement = getAddMetaScalarStatement(database.getName(), collection.getName(), docPart
- .getTableRef(),
- scalar.getIdentifier(), scalar.getType());
- sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT);
- }
-
- @Override
- public void addMetaIndex(DSLContext dsl, MetaDatabase database, MetaCollection collection,
- MetaIndex index) {
- String statement = getAddMetaIndexStatement(database.getName(), collection.getName(), index
- .getName(),
- index.isUnique());
- sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT);
- }
-
- @Override
- public void addMetaIndexField(DSLContext dsl, MetaDatabase database, MetaCollection collection,
- MetaIndex index, MetaIndexField field) {
- String statement = getAddMetaIndexFieldStatement(database.getName(), collection.getName(), index
- .getName(),
- field.getPosition(), field.getTableRef(), field.getName(), field.getOrdering());
- sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT);
- }
-
- @Override
- public void addMetaDocPartIndex(DSLContext dsl, MetaDatabase database, MetaCollection collection,
- MetaDocPart docPart, MetaIdentifiedDocPartIndex index) {
- String statement = getAddMetaDocPartIndexStatement(database.getName(), index.getIdentifier(),
- collection.getName(),
- docPart.getTableRef(), index.isUnique());
- sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT);
- }
-
- @Override
- public void addMetaDocPartIndexColumn(DSLContext dsl, MetaDatabase database,
- MetaCollection collection,
- MetaDocPart docPart, MetaIdentifiedDocPartIndex index, MetaDocPartIndexColumn column) {
- String statement = getAddMetaDocPartIndexColumnStatement(database.getName(), index
- .getIdentifier(), column.getPosition(),
- collection.getName(), docPart.getTableRef(), column.getIdentifier(), column.getOrdering());
- sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT);
- }
-
- protected String getAddMetaDatabaseStatement(String databaseName, String databaseIdentifier) {
- String statement = sqlHelper.dsl().insertInto(metaDatabaseTable)
- .set(metaDatabaseTable.newRecord().values(databaseName, databaseIdentifier)).getSQL(
- ParamType.INLINED);
- return statement;
- }
-
- protected String getAddMetaCollectionStatement(String databaseName, String collectionName,
- String collectionIdentifier) {
- String statement = sqlHelper.dsl().insertInto(metaCollectionTable)
- .set(metaCollectionTable.newRecord()
- .values(databaseName, collectionName, collectionIdentifier)).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getAddMetaDocPartStatement(String databaseName, String collectionName,
- TableRef tableRef,
- String docPartIdentifier) {
- String statement = sqlHelper.dsl().insertInto(metaDocPartTable)
- .set(metaDocPartTable.newRecord()
- .values(databaseName, collectionName, tableRef, docPartIdentifier)).getSQL(
- ParamType.INLINED);
- return statement;
- }
-
- protected String getAddMetaFieldStatement(String databaseName, String collectionName,
- TableRef tableRef,
- String fieldName, String fieldIdentifier, FieldType type) {
- String statement = sqlHelper.dsl().insertInto(metaFieldTable)
- .set(metaFieldTable.newRecord()
- .values(databaseName, collectionName, tableRef, fieldName, type, fieldIdentifier))
- .getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getAddMetaScalarStatement(String databaseName, String collectionName,
- TableRef tableRef,
- String fieldIdentifier, FieldType type) {
- String statement = sqlHelper.dsl().insertInto(metaScalarTable)
- .set(metaScalarTable.newRecord()
- .values(databaseName, collectionName, tableRef, type, fieldIdentifier)).getSQL(
- ParamType.INLINED);
- return statement;
- }
-
- protected String getAddMetaIndexStatement(String databaseName, String collectionName,
- String indexName, boolean unique) {
- String statement = sqlHelper.dsl().insertInto(metaIndexTable)
- .set(metaIndexTable.newRecord()
- .values(databaseName, collectionName, indexName, unique)).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getAddMetaIndexFieldStatement(String databaseName, String collectionName,
- String indexName,
- int position, TableRef tableRef, String fieldName, FieldIndexOrdering ordering) {
- String statement = sqlHelper.dsl().insertInto(metaIndexFieldTable)
- .set(metaIndexFieldTable.newRecord()
- .values(
- databaseName,
- collectionName,
- indexName,
- position,
- tableRef,
- fieldName,
- ordering))
- .getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getAddMetaDocPartIndexStatement(String databaseName, String indexName,
- String collectionName,
- TableRef tableRef, boolean unique) {
- String statement = sqlHelper.dsl().insertInto(metaDocPartIndexTable)
- .set(metaDocPartIndexTable.newRecord()
- .values(databaseName, indexName, collectionName, tableRef, unique)).getSQL(
- ParamType.INLINED);
- return statement;
- }
-
- protected String getAddMetaDocPartIndexColumnStatement(String databaseName, String indexName,
- int position, String collectionName,
- TableRef tableRef, String columnName, FieldIndexOrdering ordering) {
- String statement = sqlHelper.dsl().insertInto(metaDocPartIndexColumnTable)
- .set(metaDocPartIndexColumnTable.newRecord()
- .values(databaseName, indexName, position, collectionName, tableRef, columnName,
- ordering)).getSQL(ParamType.INLINED);
- return statement;
- }
-
- @Override
- public void deleteMetaDatabase(DSLContext dsl, MetaDatabase database) {
- String statement = getDeleteMetaDatabaseStatement(database.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- }
-
- @Override
- public void deleteMetaCollection(DSLContext dsl, MetaDatabase database,
- MetaCollection collection) {
- String statement = getCascadeDeleteMetaDocPartIndexColumnStatement(database.getName(),
- collection.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- statement = getCascadeDeleteMetaDocPartIndexStatement(database.getName(), collection.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
-
- statement = getCascadeDeleteMetaScalarStatement(database.getName(), collection.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- statement = getCascadeDeleteMetaFieldStatement(database.getName(), collection.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- statement = getCascadeDeleteMetaDocPartStatement(database.getName(), collection.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
-
- statement = getCascadeDeleteMetaIndexFieldStatement(database.getName(), collection.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- statement = getCascadeDeleteMetaIndexStatement(database.getName(), collection.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
-
- statement = getDeleteMetaCollectionStatement(database.getName(), collection.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- }
-
- @Override
- public void deleteMetaIndex(DSLContext dsl, MetaDatabase database, MetaCollection collection,
- MetaIndex index) {
- String statement = getCascadeDeleteMetaIndexFieldStatement(database.getName(), collection
- .getName(), index.getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- statement = getDeleteMetaIndexStatement(database.getName(), collection.getName(), index
- .getName());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- }
-
- @Override
- public void deleteMetaDocPartIndex(DSLContext dsl, MetaDatabase database,
- MetaCollection collection, MetaDocPart docPart, MetaIdentifiedDocPartIndex index) {
- String statement = getCascadeDeleteMetaDocPartIndexColumnStatement(database.getName(),
- collection.getName(), index.getIdentifier());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- statement = getDeleteMetaDocPartIndexStatement(database.getName(), collection.getName(), index
- .getIdentifier());
- sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE);
- }
-
- protected String getDeleteMetaDatabaseStatement(String databaseName) {
- String statement = sqlHelper.dsl().deleteFrom(metaDatabaseTable)
- .where(metaDatabaseTable.NAME.eq(databaseName)).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getDeleteMetaCollectionStatement(String databaseName, String collectionName) {
- String statement = sqlHelper.dsl().deleteFrom(metaCollectionTable)
- .where(metaCollectionTable.DATABASE.eq(databaseName)
- .and(metaCollectionTable.NAME.eq(collectionName))).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getCascadeDeleteMetaDocPartStatement(String databaseName,
- String collectionName) {
- String statement = sqlHelper.dsl().deleteFrom(metaDocPartTable)
- .where(metaDocPartTable.DATABASE.eq(databaseName)
- .and(metaDocPartTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getCascadeDeleteMetaFieldStatement(String databaseName, String collectionName) {
- String statement = sqlHelper.dsl().deleteFrom(metaFieldTable)
- .where(metaFieldTable.DATABASE.eq(databaseName)
- .and(metaFieldTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getCascadeDeleteMetaScalarStatement(String databaseName, String collectionName) {
- String statement = sqlHelper.dsl().deleteFrom(metaScalarTable)
- .where(metaScalarTable.DATABASE.eq(databaseName)
- .and(metaScalarTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getCascadeDeleteMetaIndexStatement(String databaseName, String collectionName) {
- String statement = sqlHelper.dsl().deleteFrom(metaIndexTable)
- .where(metaIndexTable.DATABASE.eq(databaseName)
- .and(metaIndexTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getCascadeDeleteMetaIndexFieldStatement(String databaseName,
- String collectionName) {
- String statement = sqlHelper.dsl().deleteFrom(metaIndexFieldTable)
- .where(metaIndexFieldTable.DATABASE.eq(databaseName)
- .and(metaIndexFieldTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getCascadeDeleteMetaIndexFieldStatement(String databaseName,
- String collectionName, String indexName) {
- String statement = sqlHelper.dsl().deleteFrom(metaIndexFieldTable)
- .where(metaIndexFieldTable.DATABASE.eq(databaseName)
- .and(metaIndexFieldTable.COLLECTION.eq(collectionName))
- .and(metaIndexFieldTable.INDEX.eq(indexName))).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getCascadeDeleteMetaDocPartIndexStatement(String databaseName,
- String collectionName) {
- String statement = sqlHelper.dsl().deleteFrom(metaDocPartIndexTable)
- .where(metaDocPartIndexTable.DATABASE.eq(databaseName)).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getCascadeDeleteMetaDocPartIndexColumnStatement(String databaseName,
- String collectionName) {
- String statement = sqlHelper.dsl().deleteFrom(metaDocPartIndexColumnTable)
- .where(metaDocPartIndexColumnTable.DATABASE.eq(databaseName)).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getCascadeDeleteMetaDocPartIndexColumnStatement(String databaseName,
- String collectionName, String indexIdentifier) {
- String statement = sqlHelper.dsl().deleteFrom(metaDocPartIndexColumnTable)
- .where(metaDocPartIndexColumnTable.DATABASE.eq(databaseName)
- .and(metaDocPartIndexColumnTable.INDEX_IDENTIFIER.eq(indexIdentifier))).getSQL(
- ParamType.INLINED);
- return statement;
- }
-
- protected String getDeleteMetaIndexStatement(String databaseName, String collectionName,
- String indexName) {
- String statement = sqlHelper.dsl().deleteFrom(metaIndexTable)
- .where(metaIndexTable.DATABASE.eq(databaseName)
- .and(metaIndexTable.COLLECTION.eq(collectionName))
- .and(metaIndexTable.NAME.eq(indexName))).getSQL(ParamType.INLINED);
- return statement;
- }
-
- protected String getDeleteMetaDocPartIndexStatement(String databaseName, String collectionName,
- String indexIdentifier) {
- String statement = sqlHelper.dsl().deleteFrom(metaDocPartIndexTable)
- .where(metaDocPartIndexTable.DATABASE.eq(databaseName)
- .and(metaDocPartIndexTable.IDENTIFIER.eq(indexIdentifier))).getSQL(ParamType.INLINED);
- return statement;
- }
-
- @Override
- public int consumeRids(DSLContext dsl, MetaDatabase database, MetaCollection collection,
- MetaDocPart docPart, int count) {
- Record1 lastRid = dsl.select(metaDocPartTable.LAST_RID).from(metaDocPartTable).where(
- metaDocPartTable.DATABASE.eq(database.getName())
- .and(metaDocPartTable.COLLECTION.eq(collection.getName()))
- .and(getTableRefEqCondition(metaDocPartTable.TABLE_REF, docPart.getTableRef())))
- .fetchOne();
- dsl.update(metaDocPartTable).set(metaDocPartTable.LAST_RID, metaDocPartTable.LAST_RID
- .plus(count)).where(
- metaDocPartTable.DATABASE.eq(database.getName())
- .and(metaDocPartTable.COLLECTION.eq(collection.getName()))
- .and(getTableRefEqCondition(metaDocPartTable.TABLE_REF, docPart.getTableRef())))
- .execute();
- return lastRid.value1();
- }
-
- protected abstract Condition getTableRefEqCondition(TableField, ?> field, TableRef tableRef);
-
- @Override
- public String writeMetaInfo(DSLContext dsl, MetaInfoKey key, String newValue) {
- Condition c = kvTable.KEY.eq(key.getKeyName());
-
- Optional oldValue = dsl.select(kvTable.VALUE)
- .from(kvTable)
- .where(c)
- .fetchOptional()
- .map(Record1::value1);
-
- if (oldValue.isPresent()) {
- int updatedRows = dsl.update(kvTable)
- .set(kvTable.KEY, key.getKeyName())
- .set(kvTable.VALUE, newValue)
- .where(c)
- .execute();
- assert updatedRows == 1;
- } else {
- int newRows = dsl.insertInto(kvTable, kvTable.KEY, kvTable.VALUE)
- .values(key.getKeyName(), newValue)
- .execute();
- assert newRows == 1;
- }
- return oldValue.orElse(null);
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractReadInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractReadInterface.java
deleted file mode 100644
index 0a4c41d5..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractReadInterface.java
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.Multimap;
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.backend.d2r.ResultSetDocPartResult;
-import com.torodb.backend.tables.MetaDocPartTable.DocPartTableFields;
-import com.torodb.core.TableRef;
-import com.torodb.core.TableRefFactory;
-import com.torodb.core.cursors.Cursor;
-import com.torodb.core.cursors.EmptyCursor;
-import com.torodb.core.cursors.IteratorCursor;
-import com.torodb.core.d2r.DocPartResult;
-import com.torodb.core.transaction.metainf.MetaCollection;
-import com.torodb.core.transaction.metainf.MetaDatabase;
-import com.torodb.core.transaction.metainf.MetaDocPart;
-import com.torodb.core.transaction.metainf.MetaField;
-import com.torodb.kvdocument.values.KvValue;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.jooq.DSLContext;
-import org.jooq.lambda.Seq;
-import org.jooq.lambda.Unchecked;
-import org.jooq.lambda.tuple.Tuple2;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import javax.annotation.Nonnull;
-import javax.inject.Provider;
-import javax.inject.Singleton;
-
-/**
- *
- */
-@Singleton
-@SuppressFBWarnings("SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING")
-public abstract class AbstractReadInterface implements ReadInterface {
-
- private final MetaDataReadInterface metaDataReadInterface;
- private final DataTypeProvider dataTypeProvider;
- private final ErrorHandler errorHandler;
- private final SqlHelper sqlHelper;
- private final TableRefFactory tableRefFactory;
-
- public AbstractReadInterface(MetaDataReadInterface metaDataReadInterface,
- DataTypeProvider dataTypeProvider,
- ErrorHandler errorHandler, SqlHelper sqlHelper, TableRefFactory tableRefFactory) {
- this.metaDataReadInterface = metaDataReadInterface;
- this.dataTypeProvider = dataTypeProvider;
- this.errorHandler = errorHandler;
- this.sqlHelper = sqlHelper;
- this.tableRefFactory = tableRefFactory;
- }
-
- @Override
- @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"},
- justification =
- "ResultSet is wrapped in a Cursor. It's iterated and closed in caller code")
- public Cursor getCollectionDidsWithFieldEqualsTo(DSLContext dsl,
- MetaDatabase metaDatabase,
- MetaCollection metaCol, MetaDocPart metaDocPart, MetaField metaField, KvValue> value)
- throws SQLException {
- assert metaDatabase.getMetaCollectionByIdentifier(metaCol.getIdentifier()) != null;
- assert metaCol.getMetaDocPartByIdentifier(metaDocPart.getIdentifier()) != null;
- assert metaDocPart.getMetaFieldByIdentifier(metaField.getIdentifier()) != null;
-
- String statement = getReadCollectionDidsWithFieldEqualsToStatement(metaDatabase.getIdentifier(),
- metaDocPart.getIdentifier(), metaField.getIdentifier());
- Connection connection = dsl.configuration().connectionProvider().acquire();
- try {
- PreparedStatement preparedStatement = connection.prepareStatement(statement);
- sqlHelper.setPreparedStatementValue(preparedStatement, 1, metaField.getType(), value);
- return new DefaultDidCursor(errorHandler, preparedStatement.executeQuery());
- } finally {
- dsl.configuration().connectionProvider().release(connection);
- }
- }
-
- protected abstract String getReadCollectionDidsWithFieldEqualsToStatement(String schemaName,
- String rootTableName,
- String columnName);
-
- @Override
- @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"},
- justification =
- "ResultSet is wrapped in a Cursor. It's iterated and closed in caller code")
- public Cursor getCollectionDidsWithFieldsIn(DSLContext dsl, MetaDatabase metaDatabase,
- MetaCollection metaCol, MetaDocPart metaDocPart,
- Multimap> valuesMultimap)
- throws SQLException {
- assert metaDatabase.getMetaCollectionByIdentifier(metaCol.getIdentifier()) != null;
- assert metaCol.getMetaDocPartByIdentifier(metaDocPart.getIdentifier()) != null;
- assert valuesMultimap.keySet().stream().allMatch(metafield -> metaDocPart
- .getMetaFieldByIdentifier(metafield.getIdentifier()) != null);
-
- if (valuesMultimap.size() > 500) {
- @SuppressWarnings("checkstyle:LineLength")
- Stream>, Long>>>> valuesEntriesBatchStream =
- Seq.seq(valuesMultimap.entries().stream())
- .zipWithIndex()
- .groupBy(t -> t.v2 / 500)
- .entrySet()
- .stream();
- Stream>>> valuesEntryBatchStreamOfStream =
- valuesEntriesBatchStream
- .map(e -> e.getValue()
- .stream()
- .map(se -> se.v1));
- Stream>> valuesMultimapBatchStream =
- valuesEntryBatchStreamOfStream
- .map(e -> toValuesMultimap(e));
- Stream> didCursorStream =
- valuesMultimapBatchStream
- .map(Unchecked.function(valuesMultimapBatch ->
- getCollectionDidsWithFieldsInBatch(
- dsl,
- metaDatabase,
- metaCol,
- metaDocPart,
- valuesMultimapBatch)));
- Stream didStream = didCursorStream
- .flatMap(cursor -> cursor.getRemaining().stream());
-
- return new IteratorCursor<>(didStream.iterator());
- }
-
- return getCollectionDidsWithFieldsInBatch(dsl, metaDatabase, metaCol, metaDocPart,
- valuesMultimap);
- }
-
- private Multimap> toValuesMultimap(
- Stream>> valueEntryStream) {
- Multimap> valuesMultimap = ArrayListMultimap.create();
-
- valueEntryStream.forEach(e -> valuesMultimap.put(e.getKey(), e.getValue()));
-
- return valuesMultimap;
- }
-
- @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"},
- justification =
- "ResultSet is wrapped in a Cursor. It's iterated and closed in caller code")
- private Cursor getCollectionDidsWithFieldsInBatch(DSLContext dsl,
- MetaDatabase metaDatabase,
- MetaCollection metaCol, MetaDocPart metaDocPart,
- Multimap> valuesMultimap)
- throws SQLException {
- @SuppressWarnings("checkstyle:LineLength")
- Provider>>>> valuesMultimapSortedStreamProvider =
- () -> valuesMultimap.asMap().entrySet().stream()
- .sorted((e1, e2) -> e1.getKey().getIdentifier().compareTo(e2.getKey().getIdentifier()));
- String statement = getReadCollectionDidsWithFieldInStatement(metaDatabase.getIdentifier(),
- metaDocPart.getIdentifier(), valuesMultimapSortedStreamProvider.get()
- .map(e -> new Tuple2(e.getKey().getIdentifier(), e.getValue().size())));
- Connection connection = dsl.configuration().connectionProvider().acquire();
- try {
- PreparedStatement preparedStatement = connection.prepareStatement(statement);
- int parameterIndex = 1;
- Iterator>>> valuesMultimapSortedIterator =
- valuesMultimapSortedStreamProvider.get().iterator();
- while (valuesMultimapSortedIterator.hasNext()) {
- Map.Entry>> valuesMultimapEntry =
- valuesMultimapSortedIterator.next();
- for (KvValue> value : valuesMultimapEntry.getValue()) {
- sqlHelper.setPreparedStatementValue(preparedStatement, parameterIndex, valuesMultimapEntry
- .getKey().getType(), value);
- parameterIndex++;
- }
- }
- return new DefaultDidCursor(errorHandler, preparedStatement.executeQuery());
- } finally {
- dsl.configuration().connectionProvider().release(connection);
- }
- }
-
- protected abstract String getReadCollectionDidsWithFieldInStatement(String schemaName,
- String rootTableName,
- Stream> valuesCountList);
-
- @Override
- @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"},
- justification = "ResultSet is wrapped in a Cursor>>. It's "
- + "iterated and closed in caller code")
- public Cursor>> getCollectionDidsAndProjectionWithFieldsIn(
- DSLContext dsl, MetaDatabase metaDatabase,
- MetaCollection metaCol, MetaDocPart metaDocPart,
- Multimap> valuesMultimap)
- throws SQLException {
- assert metaDatabase.getMetaCollectionByIdentifier(metaCol.getIdentifier()) != null;
- assert metaCol.getMetaDocPartByIdentifier(metaDocPart.getIdentifier()) != null;
- assert valuesMultimap.keySet().stream().allMatch(metafield -> metaDocPart
- .getMetaFieldByIdentifier(metafield.getIdentifier()) != null);
-
- Stream>>> valuesBatchStream =
- valuesMultimap.asMap().entrySet().stream()
- .map(e -> new Tuple2>>(e.getKey(), e.getValue()));
- if (valuesMultimap.asMap().entrySet().stream().anyMatch(e -> e.getValue().size() > 500)) {
- valuesBatchStream = valuesBatchStream
- .flatMap(e -> Seq.seq(e.v2.stream())
- .zipWithIndex()
- .groupBy(t -> t.v2 / 500)
- .entrySet()
- .stream()
- .map(se -> toValuesMap(e.v1, se)));
- }
- Stream>>> didProjectionCursorStream =
- valuesBatchStream
- .map(Unchecked.function(mapBatch ->
- getCollectionDidsAndProjectionWithFieldsInBatch(
- dsl,
- metaDatabase,
- metaCol,
- metaDocPart,
- mapBatch.v1,
- mapBatch.v2)));
- Stream>> didProjectionStream =
- didProjectionCursorStream
- .flatMap(cursor -> cursor.getRemaining().stream());
-
- return new IteratorCursor<>(didProjectionStream.iterator());
- }
-
- @SuppressWarnings("rawtypes")
- private Tuple2>> toValuesMap(MetaField metaField,
- Entry, Long>>> groupedValuesMap) {
- List collect = groupedValuesMap.getValue().stream()
- .map(e -> (KvValue) e.v1)
- .collect(Collectors.toList());
-
- return new Tuple2>>(metaField, collect.stream()
- .map(e -> (KvValue>) e)
- .collect(Collectors.toList()));
- }
-
- @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"},
- justification = "ResultSet is wrapped in a Cursor>>. "
- + "It's iterated and closed in caller code")
- private Cursor>> getCollectionDidsAndProjectionWithFieldsInBatch(
- DSLContext dsl, MetaDatabase metaDatabase,
- MetaCollection metaCol, MetaDocPart metaDocPart, MetaField metaField,
- Collection> values)
- throws SQLException {
- String statement = getReadCollectionDidsAndProjectionWithFieldInStatement(metaDatabase
- .getIdentifier(),
- metaDocPart.getIdentifier(), metaField.getIdentifier(), values.size());
- Connection connection = dsl.configuration().connectionProvider().acquire();
- try {
- PreparedStatement preparedStatement = connection.prepareStatement(statement);
- int parameterIndex = 1;
- for (KvValue> value : values) {
- sqlHelper.setPreparedStatementValue(preparedStatement, parameterIndex, metaField.getType(),
- value);
- parameterIndex++;
- }
- return new AbstractCursor>>(errorHandler, preparedStatement
- .executeQuery()) {
- @Override
- protected Tuple2> read(ResultSet resultSet) throws SQLException {
- return new Tuple2<>(
- resultSet.getInt(1),
- sqlHelper.getResultSetKvValue(
- metaField.getType(),
- dataTypeProvider.getDataType(metaField.getType()), resultSet, 2
- )
- );
- }
- };
- } finally {
- dsl.configuration().connectionProvider().release(connection);
- }
- }
-
- protected abstract String getReadCollectionDidsAndProjectionWithFieldInStatement(
- String schemaName,
- String rootTableName,
- String columnName, int valuesCount);
-
- @Override
- @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"},
- justification =
- "ResultSet is wrapped in a Cursor. It's iterated and closed in caller code")
- public Cursor getAllCollectionDids(DSLContext dsl, MetaDatabase metaDatabase,
- MetaCollection metaCollection)
- throws SQLException {
-
- MetaDocPart rootDocPart = metaCollection.getMetaDocPartByTableRef(tableRefFactory.createRoot());
- if (rootDocPart == null) {
- return new EmptyCursor<>();
- }
-
- String statement = getReadAllCollectionDidsStatement(metaDatabase.getIdentifier(), rootDocPart
- .getIdentifier());
- Connection connection = dsl.configuration().connectionProvider().acquire();
- try {
- PreparedStatement preparedStatement = connection.prepareStatement(statement);
- return new DefaultDidCursor(errorHandler, preparedStatement.executeQuery());
- } finally {
- dsl.configuration().connectionProvider().release(connection);
- }
- }
-
- protected abstract String getReadAllCollectionDidsStatement(String schemaName,
- String rootTableName);
-
- @Override
- public long countAll(
- @Nonnull DSLContext dsl,
- @Nonnull MetaDatabase database,
- @Nonnull MetaCollection collection
- ) {
- MetaDocPart rootDocPart = collection.getMetaDocPartByTableRef(tableRefFactory.createRoot());
- if (rootDocPart == null) {
- return 0;
- }
- String statement = getReadCountAllStatement(database.getIdentifier(), rootDocPart
- .getIdentifier());
- return sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH)
- .get(0).into(Long.class);
- }
-
- protected abstract String getReadCountAllStatement(String schema, String rootTableName);
-
- @Nonnull
- @Override
- public List getCollectionResultSets(@Nonnull DSLContext dsl,
- @Nonnull MetaDatabase metaDatabase, @Nonnull MetaCollection metaCollection,
- @Nonnull Cursor didCursor, int maxSize) throws SQLException {
- Collection dids = didCursor.getNextBatch(maxSize);
- return getCollectionResultSets(dsl, metaDatabase, metaCollection, dids);
- }
-
- @Override
- @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"},
- justification =
- "ResultSet is wrapped in a DocPartResult. It's iterated and closed in caller code")
- public List getCollectionResultSets(DSLContext dsl, MetaDatabase metaDatabase,
- MetaCollection metaCollection, Collection dids) throws SQLException {
- ArrayList result = new ArrayList<>();
- Connection connection = dsl.configuration().connectionProvider().acquire();
- try {
- Iterator extends MetaDocPart> metaDocPartIterator = metaCollection
- .streamContainedMetaDocParts()
- .sorted(TableRefComparator.MetaDocPart.DESC)
- .iterator();
- while (metaDocPartIterator.hasNext()) {
- MetaDocPart metaDocPart = metaDocPartIterator.next();
- String statament = getDocPartStatament(metaDatabase, metaDocPart, dids);
-
- PreparedStatement preparedStatement = connection.prepareStatement(statament);
- result.add(new ResultSetDocPartResult(metaDataReadInterface, dataTypeProvider, errorHandler,
- metaDocPart, preparedStatement.executeQuery(), sqlHelper));
- }
- } finally {
- dsl.configuration().connectionProvider().release(connection);
- }
- return result;
- }
-
- protected abstract String getDocPartStatament(MetaDatabase metaDatabase, MetaDocPart metaDocPart,
- Collection dids);
-
- @Override
- public int getLastRowIdUsed(DSLContext dsl, MetaDatabase metaDatabase,
- MetaCollection metaCollection, MetaDocPart metaDocPart) {
-
- String statement = getLastRowIdUsedStatement(metaDatabase, metaDocPart);
-
- Connection connection = dsl.configuration().connectionProvider().acquire();
- try (PreparedStatement preparedStatement = connection.prepareStatement(statement)) {
- try (ResultSet rs = preparedStatement.executeQuery()) {
- rs.next();
- int maxId = rs.getInt(1);
- if (rs.wasNull()) {
- return -1;
- }
- return maxId;
- }
- } catch (SQLException ex) {
- throw errorHandler.handleException(Context.FETCH, ex);
- } finally {
- dsl.configuration().connectionProvider().release(connection);
- }
- }
-
- protected abstract String getLastRowIdUsedStatement(MetaDatabase metaDatabase,
- MetaDocPart metaDocPart);
-
- protected String getPrimaryKeyColumnIdentifier(TableRef tableRef) {
- if (tableRef.isRoot()) {
- return DocPartTableFields.DID.fieldName;
- }
- return DocPartTableFields.RID.fieldName;
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractStructureInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractStructureInterface.java
deleted file mode 100644
index b32c1077..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractStructureInterface.java
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.base.Preconditions;
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.backend.converters.jooq.DataTypeForKv;
-import com.torodb.backend.meta.TorodbSchema;
-import com.torodb.backend.tables.SemanticTable;
-import com.torodb.core.TableRef;
-import com.torodb.core.backend.IdentifierConstraints;
-import com.torodb.core.exceptions.InvalidDatabaseException;
-import com.torodb.core.exceptions.user.UserException;
-import com.torodb.core.transaction.metainf.MetaCollection;
-import com.torodb.core.transaction.metainf.MetaDatabase;
-import com.torodb.core.transaction.metainf.MetaDocPart;
-import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex;
-import com.torodb.core.transaction.metainf.MetaSnapshot;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.jooq.DSLContext;
-import org.jooq.Meta;
-import org.jooq.Schema;
-import org.jooq.Table;
-import org.jooq.lambda.tuple.Tuple2;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Optional;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import javax.inject.Inject;
-import javax.inject.Singleton;
-
-@Singleton
-public abstract class AbstractStructureInterface implements StructureInterface {
-
- private static final Logger LOGGER =
- LogManager.getLogger(AbstractStructureInterface.class);
-
- private final DbBackendService dbBackend;
- private final MetaDataReadInterface metaDataReadInterface;
- private final SqlHelper sqlHelper;
- private final IdentifierConstraints identifierConstraints;
-
- @Inject
- public AbstractStructureInterface(DbBackendService dbBackend,
- MetaDataReadInterface metaDataReadInterface, SqlHelper sqlHelper,
- IdentifierConstraints identifierConstraints) {
- this.dbBackend = dbBackend;
- this.metaDataReadInterface = metaDataReadInterface;
- this.sqlHelper = sqlHelper;
- this.identifierConstraints = identifierConstraints;
- }
-
- protected abstract void dropDatabase(DSLContext dsl, String dbIdentifier);
-
- @Override
- public void dropDatabase(DSLContext dsl, MetaDatabase metaDatabase) {
- Iterator extends MetaCollection> metaCollectionIterator = metaDatabase.streamMetaCollections()
- .iterator();
- while (metaCollectionIterator.hasNext()) {
- MetaCollection metaCollection = metaCollectionIterator.next();
- Iterator extends MetaDocPart> metaDocPartIterator = metaCollection
- .streamContainedMetaDocParts()
- .sorted(TableRefComparator.MetaDocPart.DESC).iterator();
- while (metaDocPartIterator.hasNext()) {
- MetaDocPart metaDocPart = metaDocPartIterator.next();
- String statement = getDropTableStatement(metaDatabase.getIdentifier(), metaDocPart
- .getIdentifier());
- sqlHelper.executeUpdate(dsl, statement, Context.DROP_TABLE);
- }
- }
- String statement = getDropSchemaStatement(metaDatabase.getIdentifier());
- sqlHelper.executeUpdate(dsl, statement, Context.DROP_SCHEMA);
- }
-
- @Override
- public void dropCollection(DSLContext dsl, String schemaName, MetaCollection metaCollection) {
- Iterator extends MetaDocPart> metaDocPartIterator = metaCollection
- .streamContainedMetaDocParts()
- .sorted(TableRefComparator.MetaDocPart.DESC).iterator();
- while (metaDocPartIterator.hasNext()) {
- MetaDocPart metaDocPart = metaDocPartIterator.next();
- String statement = getDropTableStatement(schemaName, metaDocPart.getIdentifier());
- sqlHelper.executeUpdate(dsl, statement, Context.DROP_TABLE);
- }
- }
-
- protected abstract String getDropTableStatement(String schemaName, String tableName);
-
- protected abstract String getDropSchemaStatement(String schemaName);
-
- @Override
- public void renameCollection(DSLContext dsl, String fromSchemaName, MetaCollection fromCollection,
- String toSchemaName, MetaCollection toCollection) {
- Iterator extends MetaDocPart> metaDocPartIterator = fromCollection
- .streamContainedMetaDocParts().iterator();
- while (metaDocPartIterator.hasNext()) {
- MetaDocPart fromMetaDocPart = metaDocPartIterator.next();
- MetaDocPart toMetaDocPart = toCollection.getMetaDocPartByTableRef(fromMetaDocPart
- .getTableRef());
- String renameStatement = getRenameTableStatement(fromSchemaName, fromMetaDocPart
- .getIdentifier(), toMetaDocPart.getIdentifier());
- sqlHelper.executeUpdate(dsl, renameStatement, Context.RENAME_TABLE);
-
- Iterator extends MetaIdentifiedDocPartIndex> metaDocPartIndexIterator = fromMetaDocPart
- .streamIndexes().iterator();
- while (metaDocPartIndexIterator.hasNext()) {
- MetaIdentifiedDocPartIndex fromMetaIndex = metaDocPartIndexIterator.next();
- MetaIdentifiedDocPartIndex toMetaIndex = toMetaDocPart.streamIndexes()
- .filter(index -> index.hasSameColumns(fromMetaIndex))
- .findAny()
- .get();
-
- String renameIndexStatement = getRenameIndexStatement(fromSchemaName, fromMetaIndex
- .getIdentifier(), toMetaIndex.getIdentifier());
- sqlHelper.executeUpdate(dsl, renameIndexStatement, Context.RENAME_INDEX);
- }
-
- if (!fromSchemaName.equals(toSchemaName)) {
- String setSchemaStatement = getSetTableSchemaStatement(fromSchemaName, fromMetaDocPart
- .getIdentifier(), toSchemaName);
- sqlHelper.executeUpdate(dsl, setSchemaStatement, Context.SET_TABLE_SCHEMA);
- }
- }
- }
-
- protected abstract String getRenameTableStatement(String fromSchemaName, String fromTableName,
- String toTableName);
-
- protected abstract String getRenameIndexStatement(String fromSchemaName, String fromIndexName,
- String toIndexName);
-
- protected abstract String getSetTableSchemaStatement(String fromSchemaName, String fromTableName,
- String toSchemaName);
-
- @Override
- public void createIndex(DSLContext dsl, String indexName,
- String schemaName, String tableName,
- List> columnList, boolean unique
- ) throws UserException {
- if (!dbBackend.isOnDataInsertMode()) {
- Preconditions.checkArgument(!columnList.isEmpty(), "Can not create index on 0 columns");
-
- String statement = getCreateIndexStatement(indexName, schemaName, tableName, columnList,
- unique);
-
- sqlHelper.executeUpdateOrThrow(dsl, statement, unique ? Context.ADD_UNIQUE_INDEX :
- Context.CREATE_INDEX);
- }
- }
-
- protected abstract String getCreateIndexStatement(String indexName, String schemaName,
- String tableName, List> columnList, boolean unique);
-
- @Override
- public void dropIndex(DSLContext dsl, String schemaName, String indexName) {
- String statement = getDropIndexStatement(schemaName, indexName);
-
- sqlHelper.executeUpdate(dsl, statement, Context.DROP_INDEX);
- }
-
- @Override
- public void dropAll(DSLContext dsl) {
- dropUserDatabases(dsl, metaDataReadInterface);
- metaDataReadInterface.getMetaTables().forEach(t -> dsl.dropTable(t).execute());
- }
-
- @Override
- public void dropUserData(DSLContext dsl) {
- dropUserDatabases(dsl, metaDataReadInterface);
- metaDataReadInterface.getMetaTables().forEach(t ->
- dsl.deleteFrom(t).execute()
- );
- }
-
- /**
- * This method drops all user databases (usually, db schemas).
- *
- * To implement this method, metainformation found on metatables can be acceded using the given
- * {@link MetaDataReadInterface}.
- *
- * @param dsl
- * @param metaReadInterface
- */
- protected void dropUserDatabases(DSLContext dsl, MetaDataReadInterface metaDataReadInterface) {
- metaDataReadInterface.readMetaDatabaseTable(dsl)
- .forEach(dbRecord -> dropDatabase(dsl, dbRecord.getIdentifier()));
- }
-
- @Override
- public Optional findTorodbSchema(DSLContext dsl, Meta jooqMeta) {
- Schema torodbSchema = null;
- for (Schema schema : jooqMeta.getSchemas()) {
- if (identifierConstraints.isSameIdentifier(TorodbSchema.IDENTIFIER, schema.getName())) {
- torodbSchema = schema;
- break;
- }
- }
- return Optional.ofNullable(torodbSchema);
- }
-
- @Override
- @SuppressWarnings({"rawtypes", "unchecked"})
- public void checkMetaDataTables(Schema torodbSchema) {
-
- List> metaTables = metaDataReadInterface.getMetaTables();
- for (SemanticTable metaTable : metaTables) {
- String metaTableName = metaTable.getName();
- boolean metaTableFound = false;
- for (Table> table : torodbSchema.getTables()) {
- if (identifierConstraints.isSameIdentifier(table.getName(), metaTableName)) {
- metaTable.checkSemanticallyEquals(table);
- metaTableFound = true;
- LOGGER.debug(table + " found and check");
- }
- }
- if (!metaTableFound) {
- throw new InvalidDatabaseException("The schema '" + TorodbSchema.IDENTIFIER + "'"
- + " does not contain the expected meta table '"
- + metaTableName + "'");
- }
- }
-
- }
-
- protected String getDropIndexStatement(String schemaName, String indexName) {
- StringBuilder sb = new StringBuilder()
- .append("DROP INDEX ")
- .append("\"").append(schemaName).append("\"")
- .append(".")
- .append("\"").append(indexName).append("\"");
- String statement = sb.toString();
- return statement;
- }
-
- @Override
- public void createSchema(DSLContext dsl, String schemaName) {
- String statement = getCreateSchemaStatement(schemaName);
- sqlHelper.executeUpdate(dsl, statement, Context.CREATE_SCHEMA);
- }
-
- protected abstract String getCreateSchemaStatement(String schemaName);
-
- @Override
- public void createRootDocPartTable(DSLContext dsl, String schemaName, String tableName,
- TableRef tableRef) {
- String statement = getCreateDocPartTableStatement(schemaName, tableName, metaDataReadInterface
- .getInternalFields(tableRef));
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- @Override
- public void createDocPartTable(DSLContext dsl, String schemaName, String tableName,
- TableRef tableRef, String foreignTableName) {
- String statement = getCreateDocPartTableStatement(schemaName, tableName, metaDataReadInterface
- .getInternalFields(tableRef));
- sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE);
- }
-
- protected abstract String getCreateDocPartTableStatement(String schemaName, String tableName,
- Collection> fields);
-
- @Override
- public Stream> streamRootDocPartTableIndexesCreation(
- String schemaName, String tableName, TableRef tableRef) {
- List> result = new ArrayList<>(1);
- if (!dbBackend.isOnDataInsertMode()) {
- String primaryKeyStatement = getAddDocPartTablePrimaryKeyStatement(schemaName, tableName,
- metaDataReadInterface.getPrimaryKeyInternalFields(tableRef));
-
- result.add(dsl -> {
- sqlHelper.executeStatement(dsl, primaryKeyStatement, Context.ADD_UNIQUE_INDEX);
- return metaDataReadInterface.getPrimaryKeyInternalFields(tableRef).stream().map(f -> f
- .getName()).collect(Collectors.joining("_")) + "_pkey";
- });
- }
- return result.stream();
- }
-
- @Override
- public Stream> streamDocPartTableIndexesCreation(String schemaName,
- String tableName, TableRef tableRef, String foreignTableName) {
- List> result = new ArrayList<>(4);
- if (!dbBackend.isOnDataInsertMode()) {
- String primaryKeyStatement = getAddDocPartTablePrimaryKeyStatement(schemaName, tableName,
- metaDataReadInterface.getPrimaryKeyInternalFields(tableRef));
- result.add((dsl) -> {
- sqlHelper.executeStatement(dsl, primaryKeyStatement, Context.ADD_UNIQUE_INDEX);
- return "rid_pkey";
- });
- }
-
- if (!dbBackend.isOnDataInsertMode()) {
- if (dbBackend.includeForeignKeys()) {
- String foreignKeyStatement = getAddDocPartTableForeignKeyStatement(schemaName, tableName,
- metaDataReadInterface.getReferenceInternalFields(tableRef),
- foreignTableName, metaDataReadInterface.getForeignInternalFields(tableRef));
- result.add((dsl) -> {
- sqlHelper.executeStatement(dsl, foreignKeyStatement, Context.ADD_FOREIGN_KEY);
- return metaDataReadInterface.getReferenceInternalFields(tableRef).stream().map(f -> f
- .getName()).collect(Collectors.joining("_")) + "_fkey";
- });
- } else {
- String foreignKeyIndexStatement = getCreateDocPartTableIndexStatement(schemaName, tableName,
- metaDataReadInterface.getReferenceInternalFields(tableRef));
- result.add((dsl) -> {
- sqlHelper.executeStatement(dsl, foreignKeyIndexStatement, Context.CREATE_INDEX);
- return metaDataReadInterface.getReferenceInternalFields(tableRef).stream().map(f -> f
- .getName()).collect(Collectors.joining("_")) + "_idx";
- });
- }
- }
-
- if (!dbBackend.isOnDataInsertMode()) {
- String readIndexStatement = getCreateDocPartTableIndexStatement(schemaName, tableName,
- metaDataReadInterface.getReadInternalFields(tableRef));
- result.add((dsl) -> {
- sqlHelper.executeStatement(dsl, readIndexStatement, Context.CREATE_INDEX);
- return metaDataReadInterface.getReadInternalFields(tableRef).stream()
- .map(f -> f.getName()).collect(Collectors.joining("_")) + "_idx";
- });
- }
-
- return result.stream();
- }
-
- @Override
- public Stream> streamDataInsertFinishTasks(MetaSnapshot snapshot) {
- return Collections.>emptySet().stream();
- }
-
- protected abstract String getAddDocPartTablePrimaryKeyStatement(String schemaName,
- String tableName,
- Collection> primaryKeyFields);
-
- protected abstract String getAddDocPartTableForeignKeyStatement(String schemaName,
- String tableName,
- Collection> referenceFields, String foreignTableName,
- Collection> foreignFields);
-
- protected abstract String getCreateDocPartTableIndexStatement(String schemaName, String tableName,
- Collection> indexedFields);
-
- @Override
- public void addColumnToDocPartTable(DSLContext dsl, String schemaName, String tableName,
- String columnName, DataTypeForKv> dataType) {
- String statement = getAddColumnToDocPartTableStatement(schemaName, tableName, columnName,
- dataType);
-
- sqlHelper.executeStatement(dsl, statement, Context.ADD_COLUMN);
- }
-
- protected abstract String getAddColumnToDocPartTableStatement(String schemaName, String tableName,
- String columnName, DataTypeForKv> dataType);
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractWriteInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractWriteInterface.java
deleted file mode 100644
index a7e8897d..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractWriteInterface.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.core.cursors.Cursor;
-import com.torodb.core.d2r.DocPartData;
-import com.torodb.core.d2r.DocPartRow;
-import com.torodb.core.exceptions.user.UserException;
-import com.torodb.core.transaction.metainf.FieldType;
-import com.torodb.core.transaction.metainf.MetaCollection;
-import com.torodb.core.transaction.metainf.MetaDocPart;
-import com.torodb.core.transaction.metainf.MetaField;
-import com.torodb.core.transaction.metainf.MetaScalar;
-import com.torodb.kvdocument.values.KvValue;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.jooq.DSLContext;
-import org.jooq.exception.DataAccessException;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-
-import javax.annotation.Nonnull;
-import javax.inject.Singleton;
-
-/**
- *
- */
-@Singleton
-@SuppressFBWarnings("SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING")
-public abstract class AbstractWriteInterface implements WriteInterface {
-
- private static final Logger LOGGER = LogManager.getLogger(AbstractWriteInterface.class);
-
- private final MetaDataReadInterface metaDataReadInterface;
- private final ErrorHandler errorHandler;
- private final SqlHelper sqlHelper;
-
- public AbstractWriteInterface(MetaDataReadInterface metaDataReadInterface,
- ErrorHandler errorHandler,
- SqlHelper sqlHelper) {
- super();
- this.metaDataReadInterface = metaDataReadInterface;
- this.errorHandler = errorHandler;
- this.sqlHelper = sqlHelper;
- }
-
- @Override
- public long deleteCollectionDocParts(@Nonnull DSLContext dsl,
- @Nonnull String schemaName, @Nonnull MetaCollection metaCollection,
- @Nonnull Cursor didCursor
- ) {
- Connection c = dsl.configuration().connectionProvider().acquire();
- try {
- int maxBatchSize = 100;
- long deleted = 0;
-
- while (didCursor.hasNext()) {
- Collection dids = didCursor.getNextBatch(maxBatchSize);
- deleteCollectionDocParts(c, schemaName, metaCollection, dids);
- deleted += dids.size();
- }
-
- return deleted;
- } finally {
- dsl.configuration().connectionProvider().release(c);
- }
- }
-
- @Override
- public void deleteCollectionDocParts(@Nonnull DSLContext dsl,
- @Nonnull String schemaName, @Nonnull MetaCollection metaCollection,
- @Nonnull Collection dids
- ) {
- Connection c = dsl.configuration().connectionProvider().acquire();
- try {
- deleteCollectionDocParts(c, schemaName, metaCollection, dids);
- } finally {
- dsl.configuration().connectionProvider().release(c);
- }
- }
-
- private void deleteCollectionDocParts(Connection c, String schemaName,
- MetaCollection metaCollection,
- Collection dids) {
- Iterator extends MetaDocPart> iterator = metaCollection.streamContainedMetaDocParts()
- .sorted(TableRefComparator.MetaDocPart.DESC).iterator();
- while (iterator.hasNext()) {
- MetaDocPart metaDocPart = iterator.next();
- String statement = getDeleteDocPartsStatement(schemaName, metaDocPart.getIdentifier(), dids);
-
- sqlHelper.executeUpdate(c, statement, Context.DELETE);
-
- LOGGER.trace("Executed {}", statement);
- }
- }
-
- protected abstract String getDeleteDocPartsStatement(String schemaName, String tableName,
- Collection dids);
-
- @Override
- public void insertDocPartData(DSLContext dsl, String schemaName, DocPartData docPartData) throws
- UserException {
- Iterator docPartRowIterator = docPartData.iterator();
- if (!docPartRowIterator.hasNext()) {
- return;
- }
-
- try {
- MetaDocPart metaDocPart = docPartData.getMetaDocPart();
- Iterator metaScalarIterator = docPartData.orderedMetaScalarIterator();
- Iterator metaFieldIterator = docPartData.orderedMetaFieldIterator();
- standardInsertDocPartData(dsl, schemaName, docPartData, metaDocPart, metaScalarIterator,
- metaFieldIterator, docPartRowIterator);
- } catch (DataAccessException ex) {
- throw errorHandler.handleUserException(Context.INSERT, ex);
- }
- }
-
- protected int getMaxBatchSize() {
- return 30;
- }
-
- protected void standardInsertDocPartData(DSLContext dsl, String schemaName,
- DocPartData docPartData, MetaDocPart metaDocPart,
- Iterator metaScalarIterator, Iterator metaFieldIterator,
- Iterator docPartRowIterator) throws UserException {
- final int maxBatchSize = getMaxBatchSize();
- Collection> internalFields = metaDataReadInterface.getInternalFields(
- metaDocPart);
- List fieldTypeList = new ArrayList<>();
- String statement = getInsertDocPartDataStatement(schemaName, metaDocPart, metaFieldIterator,
- metaScalarIterator,
- internalFields, fieldTypeList);
- assert assertFieldTypeListIsConsistent(docPartData, fieldTypeList) :
- "fieldTypeList should be an ordered list of FieldType"
- + " from MetaScalar and MetaField following the the ordering of "
- + "DocPartData.orderedMetaScalarIterator and DocPartData.orderedMetaFieldIterator";
-
- Connection connection = dsl.configuration().connectionProvider().acquire();
- try {
- try (PreparedStatement preparedStatement = connection.prepareStatement(statement)) {
- int docCounter = 0;
- while (docPartRowIterator.hasNext()) {
- DocPartRow docPartRow = docPartRowIterator.next();
- docCounter++;
- int parameterIndex = 1;
- for (InternalField> internalField : internalFields) {
- internalField.set(preparedStatement, parameterIndex, docPartRow);
- parameterIndex++;
- }
- Iterator fieldTypeIterator = fieldTypeList.iterator();
- for (KvValue> value : docPartRow.getScalarValues()) {
- sqlHelper.setPreparedStatementNullableValue(
- preparedStatement, parameterIndex++,
- fieldTypeIterator.next(),
- value);
- }
- for (KvValue> value : docPartRow.getFieldValues()) {
- sqlHelper.setPreparedStatementNullableValue(
- preparedStatement, parameterIndex++,
- fieldTypeIterator.next(),
- value);
- }
- preparedStatement.addBatch();
-
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Added to insert {}", preparedStatement.toString());
- }
-
- if (docCounter % maxBatchSize == 0 || !docPartRowIterator.hasNext()) {
- preparedStatement.executeBatch();
-
- LOGGER.trace("Insertion batch executed");
- }
- }
- }
- } catch (SQLException ex) {
- throw errorHandler.handleUserException(Context.INSERT, ex);
- } finally {
- dsl.configuration().connectionProvider().release(connection);
- }
- }
-
- protected abstract String getInsertDocPartDataStatement(
- String schemaName,
- MetaDocPart metaDocPart,
- Iterator metaFieldIterator,
- Iterator metaScalarIterator,
- Collection> internalFields,
- List fieldTypeList);
-
- private boolean assertFieldTypeListIsConsistent(DocPartData docPartData,
- List fieldTypeList) {
- Iterator metaScalarIterator = docPartData.orderedMetaScalarIterator();
- Iterator metaFieldIterator = docPartData.orderedMetaFieldIterator();
- Iterator fieldTypeIterator = fieldTypeList.iterator();
- while (metaScalarIterator.hasNext()) {
- if (!fieldTypeIterator.hasNext() || !metaScalarIterator.next().getType().equals(
- fieldTypeIterator.next())) {
- return false;
- }
- }
- while (metaFieldIterator.hasNext()) {
- if (!fieldTypeIterator.hasNext() || !metaFieldIterator.next().getType().equals(
- fieldTypeIterator.next())) {
- return false;
- }
- }
- return true;
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendBundleImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendBundleImpl.java
deleted file mode 100644
index 205af85e..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/BackendBundleImpl.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.inject.assistedinject.Assisted;
-import com.torodb.core.backend.BackendBundle;
-import com.torodb.core.backend.BackendConnection;
-import com.torodb.core.backend.BackendService;
-import com.torodb.core.backend.ExclusiveWriteBackendTransaction;
-import com.torodb.core.modules.AbstractBundle;
-import com.torodb.core.supervision.Supervisor;
-
-import java.util.concurrent.ThreadFactory;
-
-import javax.inject.Inject;
-
-/**
- *
- */
-public class BackendBundleImpl extends AbstractBundle implements BackendBundle {
-
- private final DbBackendService lowLevelService;
- private final BackendService backendService;
-
- @Inject
- public BackendBundleImpl(DbBackendService lowLevelService,
- BackendServiceImpl backendService, ThreadFactory threadFactory,
- @Assisted Supervisor supervisor) {
- super(threadFactory, supervisor);
- this.lowLevelService = lowLevelService;
- this.backendService = backendService;
- }
-
- @Override
- protected void postDependenciesStartUp() throws Exception {
- lowLevelService.startAsync();
- lowLevelService.awaitRunning();
-
- backendService.startAsync();
- backendService.awaitRunning();
-
- try (BackendConnection conn = backendService.openConnection();
- ExclusiveWriteBackendTransaction trans = conn.openExclusiveWriteTransaction()) {
-
- trans.checkOrCreateMetaDataTables();
- trans.commit();
- }
- }
-
- @Override
- protected void preDependenciesShutDown() throws Exception {
- backendService.stopAsync();
- backendService.awaitTerminated();
-
- lowLevelService.stopAsync();
- lowLevelService.awaitTerminated();
- }
-
- @Override
- public BackendService getBackendService() {
- return backendService;
- }
-
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendConfiguration.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendConfiguration.java
deleted file mode 100644
index 8ffe0d68..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/BackendConfiguration.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import javax.annotation.Nonnegative;
-
-/**
- * Configuration data for the backend
- */
-public interface BackendConfiguration {
-
- long getCursorTimeout();
-
- long getConnectionPoolTimeout();
-
- int getConnectionPoolSize();
-
- int getReservedReadPoolSize();
-
- String getUsername();
-
- String getPassword();
-
- String getDbHost();
-
- String getDbName();
-
- @Nonnegative
- int getDbPort();
-
- boolean includeForeignKeys();
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendConnectionImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendConnectionImpl.java
deleted file mode 100644
index 4f90e60c..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/BackendConnectionImpl.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.base.Preconditions;
-import com.torodb.backend.meta.SchemaUpdater;
-import com.torodb.core.backend.BackendConnection;
-import com.torodb.core.backend.BackendTransaction;
-import com.torodb.core.backend.ExclusiveWriteBackendTransaction;
-import com.torodb.core.backend.ReadOnlyBackendTransaction;
-import com.torodb.core.backend.SharedWriteBackendTransaction;
-import com.torodb.core.d2r.IdentifierFactory;
-import com.torodb.core.d2r.ReservedIdGenerator;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-/**
- *
- */
-public class BackendConnectionImpl implements BackendConnection {
-
- private static final Logger LOGGER = LogManager.getLogger(BackendConnectionImpl.class);
- private final BackendServiceImpl backend;
- private final SqlInterface sqlInterface;
- private boolean closed = false;
- private final IdentifierFactory identifierFactory;
- private final ReservedIdGenerator ridGenerator;
- private BackendTransaction currentTransaction;
-
- public BackendConnectionImpl(BackendServiceImpl backend,
- SqlInterface sqlInterface, ReservedIdGenerator ridGenerator,
- IdentifierFactory identifierFactory) {
- this.backend = backend;
- this.sqlInterface = sqlInterface;
- this.identifierFactory = identifierFactory;
- this.ridGenerator = ridGenerator;
- }
-
- @Override
- public ReadOnlyBackendTransaction openReadOnlyTransaction() {
- Preconditions.checkState(!closed, "This connection is closed");
- Preconditions.checkState(currentTransaction == null,
- "Another transaction is currently under execution. Transaction is " + currentTransaction);
-
- ReadOnlyBackendTransactionImpl transaction = new ReadOnlyBackendTransactionImpl(sqlInterface,
- this);
- currentTransaction = transaction;
-
- return transaction;
- }
-
- @Override
- public SharedWriteBackendTransaction openSharedWriteTransaction() {
- Preconditions.checkState(!closed, "This connection is closed");
- Preconditions.checkState(currentTransaction == null,
- "Another transaction is currently under execution. Transaction is " + currentTransaction);
-
- SharedWriteBackendTransactionImpl transaction = new SharedWriteBackendTransactionImpl(
- sqlInterface, this, identifierFactory);
- currentTransaction = transaction;
-
- return transaction;
- }
-
- @Override
- public ExclusiveWriteBackendTransaction openExclusiveWriteTransaction() {
- Preconditions.checkState(!closed, "This connection is closed");
- Preconditions.checkState(currentTransaction == null,
- "Another transaction is currently under execution. Transaction is " + currentTransaction);
-
- ExclusiveWriteBackendTransactionImpl transaction = new ExclusiveWriteBackendTransactionImpl(
- sqlInterface, this, identifierFactory, ridGenerator);
- currentTransaction = transaction;
-
- return transaction;
- }
-
- KvMetainfoHandler getMetaInfoHandler() {
- return backend.getMetaInfoHandler();
- }
-
- SchemaUpdater getSchemaUpdater() {
- return backend.getSchemaUpdater();
- }
-
- @Override
- public void close() {
- if (!closed) {
- closed = true;
- if (currentTransaction != null) {
- currentTransaction.close();
- }
- assert currentTransaction == null;
- backend.onConnectionClosed(this);
- }
- }
-
- void onTransactionClosed(BackendTransaction transaction) {
- if (currentTransaction == null) {
- LOGGER.debug(
- "Recived an on transaction close notification, but there is no current transaction");
- return;
- }
- if (currentTransaction != transaction) {
- LOGGER.debug("Recived an on transaction close notification, but the recived transaction is "
- + "not the same as the current one");
- return;
- }
- currentTransaction = null;
- }
-
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendServiceImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendServiceImpl.java
deleted file mode 100644
index c2fdb2ff..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/BackendServiceImpl.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.backend.meta.SchemaUpdater;
-import com.torodb.core.annotations.TorodbIdleService;
-import com.torodb.core.backend.BackendConnection;
-import com.torodb.core.backend.BackendService;
-import com.torodb.core.concurrent.ConcurrentToolsFactory;
-import com.torodb.core.concurrent.StreamExecutor;
-import com.torodb.core.d2r.IdentifierFactory;
-import com.torodb.core.d2r.ReservedIdGenerator;
-import com.torodb.core.exceptions.SystemException;
-import com.torodb.core.exceptions.ToroRuntimeException;
-import com.torodb.core.exceptions.user.UserException;
-import com.torodb.core.retrier.Retrier;
-import com.torodb.core.retrier.Retrier.Hint;
-import com.torodb.core.retrier.RetrierGiveUpException;
-import com.torodb.core.services.IdleTorodbService;
-import com.torodb.core.transaction.RollbackException;
-import com.torodb.core.transaction.metainf.MetaCollection;
-import com.torodb.core.transaction.metainf.MetaDatabase;
-import com.torodb.core.transaction.metainf.MetaDocPart;
-import com.torodb.core.transaction.metainf.MetaDocPartIndexColumn;
-import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex;
-import com.torodb.core.transaction.metainf.MetaSnapshot;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.jooq.DSLContext;
-import org.jooq.lambda.tuple.Tuple2;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.ThreadFactory;
-import java.util.function.Consumer;
-import java.util.function.Function;
-import java.util.stream.Stream;
-
-import javax.inject.Inject;
-
-/**
- *
- */
-public class BackendServiceImpl extends IdleTorodbService implements BackendService {
-
- private static final Logger LOGGER = LogManager.getLogger(BackendServiceImpl.class);
-
- private final DbBackendService dbBackendService;
- private final SqlInterface sqlInterface;
- private final ReservedIdGenerator ridGenerator;
- private final Retrier retrier;
- private final StreamExecutor streamExecutor;
- private final KvMetainfoHandler metainfoHandler;
- private final IdentifierFactory identifierFactory;
- private final SchemaUpdater schemaUpdater;
-
- /**
- * @param threadFactory the thread factory that will be used to create the startup and
- * shutdown threads
- * @param dbBackendService
- * @param sqlInterface
- * @param schemaUpdater
- * @param metainfoHandler
- * @param identifierFactory
- * @param ridGenerator
- * @param retrier
- * @param concurrentToolsFactory
- */
- @Inject
- public BackendServiceImpl(@TorodbIdleService ThreadFactory threadFactory,
- ReservedIdGenerator ridGenerator, DbBackendService dbBackendService,
- SqlInterface sqlInterface, IdentifierFactory identifierFactory,
- Retrier retrier,
- ConcurrentToolsFactory concurrentToolsFactory,
- KvMetainfoHandler metainfoHandler, SchemaUpdater schemaUpdater) {
- super(threadFactory);
-
- this.dbBackendService = dbBackendService;
- this.sqlInterface = sqlInterface;
- this.ridGenerator = ridGenerator;
- this.retrier = retrier;
- this.streamExecutor = concurrentToolsFactory.createStreamExecutor("backend-inner-jobs", true);
- this.metainfoHandler = metainfoHandler;
- this.identifierFactory = identifierFactory;
- this.schemaUpdater = schemaUpdater;
- }
-
- @Override
- public BackendConnection openConnection() {
- return new BackendConnectionImpl(this, sqlInterface, ridGenerator, identifierFactory);
- }
-
- @Override
- public void enableDataImportMode(MetaSnapshot snapshot) throws RollbackException {
- if (!sqlInterface.getDbBackend().isOnDataInsertMode()) {
- if (snapshot.streamMetaDatabases().findAny().isPresent()) {
- throw new IllegalStateException("Can not disable indexes if any database exists");
- }
-
- sqlInterface.getDbBackend().enableDataInsertMode();
- }
- }
-
- @Override
- public void disableDataImportMode(MetaSnapshot snapshot) throws RollbackException {
- if (sqlInterface.getDbBackend().isOnDataInsertMode()) {
- sqlInterface.getDbBackend().disableDataInsertMode();
-
- //create internal indexes
- Stream> createInternalIndexesJobs = snapshot.streamMetaDatabases()
- .flatMap(
- db -> db.streamMetaCollections().flatMap(
- col -> col.streamContainedMetaDocParts().flatMap(
- docPart -> enableInternalIndexJobs(db, col, docPart)
- )
- )
- );
-
- //create indexes
- Stream> createIndexesJobs = snapshot.streamMetaDatabases().flatMap(
- db -> db.streamMetaCollections().flatMap(
- col -> enableIndexJobs(db, col)
- )
- );
-
- //backend specific jobs
- Stream> backendSpecificJobs = sqlInterface.getStructureInterface()
- .streamDataInsertFinishTasks(snapshot).map(job -> {
- return (Consumer) dsl -> {
- String index = job.apply(dsl);
- LOGGER.info("Task {} completed", index);
- };
- });
- Stream> jobs = Stream
- .concat(createInternalIndexesJobs, createIndexesJobs);
- jobs = Stream.concat(jobs, backendSpecificJobs);
- Stream runnables = jobs.map(this::dslConsumerToRunnable);
-
- streamExecutor.executeRunnables(runnables)
- .join();
- }
- }
-
- private Stream> enableInternalIndexJobs(MetaDatabase db, MetaCollection col,
- MetaDocPart docPart) {
- StructureInterface structureInterface = sqlInterface.getStructureInterface();
-
- Stream> consumerStream;
-
- if (docPart.getTableRef().isRoot()) {
- consumerStream = structureInterface.streamRootDocPartTableIndexesCreation(
- db.getIdentifier(),
- docPart.getIdentifier(),
- docPart.getTableRef()
- );
- } else {
- MetaDocPart parentDocPart = col.getMetaDocPartByTableRef(
- docPart.getTableRef().getParent().get()
- );
- assert parentDocPart != null;
- consumerStream = structureInterface.streamDocPartTableIndexesCreation(
- db.getIdentifier(),
- docPart.getIdentifier(),
- docPart.getTableRef(),
- parentDocPart.getIdentifier()
- );
- }
-
- return consumerStream.map(job -> {
- return (Consumer) dsl -> {
- String index = job.apply(dsl);
- LOGGER.info("Created internal index {} for table {}", index, docPart.getIdentifier());
- };
- });
- }
-
- private Stream> enableIndexJobs(MetaDatabase db, MetaCollection col) {
- List> consumerList = new ArrayList<>();
-
- Iterator extends MetaDocPart> docPartIterator = col.streamContainedMetaDocParts().iterator();
- while (docPartIterator.hasNext()) {
- MetaDocPart docPart = docPartIterator.next();
-
- Iterator extends MetaIdentifiedDocPartIndex> docPartIndexIterator = docPart.streamIndexes()
- .iterator();
- while (docPartIndexIterator.hasNext()) {
- MetaIdentifiedDocPartIndex docPartIndex = docPartIndexIterator.next();
-
- consumerList.add(createIndexJob(db, docPart, docPartIndex));
- }
- }
-
- return consumerList.stream();
- }
-
- private Consumer createIndexJob(MetaDatabase db, MetaDocPart docPart,
- MetaIdentifiedDocPartIndex docPartIndex) {
- return dsl -> {
- List> columnList = new ArrayList<>(docPartIndex.size());
- for (Iterator extends MetaDocPartIndexColumn> indexColumnIterator = docPartIndex
- .iteratorColumns(); indexColumnIterator.hasNext();) {
- MetaDocPartIndexColumn indexColumn = indexColumnIterator.next();
- columnList.add(new Tuple2<>(indexColumn.getIdentifier(), indexColumn.getOrdering()
- .isAscending()));
- }
-
- try {
- sqlInterface.getStructureInterface().createIndex(
- dsl, docPartIndex.getIdentifier(), db.getIdentifier(), docPart.getIdentifier(),
- columnList,
- docPartIndex.isUnique());
- } catch (UserException userException) {
- throw new SystemException(userException);
- }
- LOGGER.info("Created index {} for table {}", docPartIndex.getIdentifier(), docPart
- .getIdentifier());
- };
- }
-
- private Runnable dslConsumerToRunnable(Consumer consumer) {
- return () -> {
- try {
- retrier.retry(() -> {
- try (Connection connection = sqlInterface.getDbBackend().createWriteConnection()) {
- DSLContext dsl = sqlInterface.getDslContextFactory()
- .createDslContext(connection);
-
- consumer.accept(dsl);
- connection.commit();
- return null;
- } catch (SQLException ex) {
- throw sqlInterface.getErrorHandler().handleException(Context.CREATE_INDEX, ex);
- }
- }, Hint.CRITICAL);
- } catch (RetrierGiveUpException ex) {
- throw new ToroRuntimeException(ex);
- }
- };
- }
-
- @Override
- protected void startUp() throws Exception {
- LOGGER.debug("Starting backend...");
-
- streamExecutor.startAsync();
- streamExecutor.awaitRunning();
-
- LOGGER.trace("Waiting for {} to be running...", dbBackendService);
- dbBackendService.awaitRunning();
-
- LOGGER.debug("Backend started");
- }
-
- @Override
- protected void shutDown() throws Exception {
- streamExecutor.stopAsync();
- streamExecutor.awaitTerminated();
- }
-
- void onConnectionClosed(BackendConnectionImpl connection) {
- }
-
- KvMetainfoHandler getMetaInfoHandler() {
- return metainfoHandler;
- }
-
- SchemaUpdater getSchemaUpdater() {
- return schemaUpdater;
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendTransactionImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendTransactionImpl.java
deleted file mode 100644
index 6bb02815..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/BackendTransactionImpl.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.collect.Multimap;
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.core.backend.BackendCursor;
-import com.torodb.core.backend.BackendTransaction;
-import com.torodb.core.backend.EmptyBackendCursor;
-import com.torodb.core.backend.MetaInfoKey;
-import com.torodb.core.cursors.Cursor;
-import com.torodb.core.cursors.EmptyCursor;
-import com.torodb.core.exceptions.InvalidDatabaseException;
-import com.torodb.core.transaction.metainf.MetaCollection;
-import com.torodb.core.transaction.metainf.MetaDatabase;
-import com.torodb.core.transaction.metainf.MetaDocPart;
-import com.torodb.core.transaction.metainf.MetaField;
-import com.torodb.kvdocument.values.KvValue;
-import org.jooq.DSLContext;
-import org.jooq.lambda.tuple.Tuple2;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.Optional;
-
-/**
- *
- */
-public abstract class BackendTransactionImpl implements BackendTransaction {
-
- private boolean closed = false;
- private final Connection connection;
- private final DSLContext dsl;
- private final SqlInterface sqlInterface;
- private final BackendConnectionImpl backendConnection;
-
- public BackendTransactionImpl(Connection connection, SqlInterface sqlInterface,
- BackendConnectionImpl backendConnection) {
- this.connection = connection;
- this.dsl = sqlInterface.getDslContextFactory().createDslContext(connection);
- this.sqlInterface = sqlInterface;
- this.backendConnection = backendConnection;
- }
-
- boolean isClosed() {
- return closed;
- }
-
- Connection getConnection() {
- return connection;
- }
-
- DSLContext getDsl() {
- return dsl;
- }
-
- SqlInterface getSqlInterface() {
- return sqlInterface;
- }
-
- BackendConnectionImpl getBackendConnection() {
- return backendConnection;
- }
-
- @Override
- public long getDatabaseSize(MetaDatabase db) {
- return sqlInterface.getMetaDataReadInterface().getDatabaseSize(getDsl(), db);
- }
-
- @Override
- public long countAll(MetaDatabase db, MetaCollection col) {
- return sqlInterface.getReadInterface().countAll(getDsl(), db, col);
- }
-
- @Override
- public long getCollectionSize(MetaDatabase db, MetaCollection col) {
- return sqlInterface.getMetaDataReadInterface().getCollectionSize(getDsl(), db, col);
- }
-
- @Override
- public long getDocumentsSize(MetaDatabase db, MetaCollection col) {
- return sqlInterface.getMetaDataReadInterface().getDocumentsSize(getDsl(), db, col);
- }
-
- @Override
- public BackendCursor findAll(MetaDatabase db, MetaCollection col) {
- try {
- Cursor allDids = sqlInterface.getReadInterface().getAllCollectionDids(dsl, db, col);
- return new LazyBackendCursor(sqlInterface, allDids, dsl, db, col);
- } catch (SQLException ex) {
- throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex);
- }
- }
-
- @Override
- public BackendCursor findByField(MetaDatabase db, MetaCollection col, MetaDocPart docPart,
- MetaField field, KvValue> value) {
- try {
- Cursor allDids = sqlInterface.getReadInterface().getCollectionDidsWithFieldEqualsTo(
- dsl, db, col, docPart, field, value);
- return new LazyBackendCursor(sqlInterface, allDids, dsl, db, col);
- } catch (SQLException ex) {
- throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex);
- }
- }
-
- @Override
- public BackendCursor findByFieldIn(MetaDatabase db, MetaCollection col, MetaDocPart docPart,
- Multimap> valuesMultimap) {
- try {
- if (valuesMultimap.isEmpty()) {
- return new EmptyBackendCursor();
- }
- Cursor allDids = sqlInterface.getReadInterface().getCollectionDidsWithFieldsIn(dsl,
- db, col, docPart, valuesMultimap);
- return new LazyBackendCursor(sqlInterface, allDids, dsl, db, col);
- } catch (SQLException ex) {
- throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex);
- }
- }
-
- @Override
- public Cursor>> findByFieldInProjection(MetaDatabase db,
- MetaCollection col, MetaDocPart docPart,
- Multimap> valuesMultimap) {
- try {
- if (valuesMultimap.isEmpty()) {
- return new EmptyCursor<>();
- }
- return sqlInterface.getReadInterface()
- .getCollectionDidsAndProjectionWithFieldsIn(dsl, db, col, docPart, valuesMultimap);
- } catch (SQLException ex) {
- throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex);
- }
- }
-
- @Override
- public BackendCursor fetch(MetaDatabase db, MetaCollection col, Cursor didCursor) {
- return new LazyBackendCursor(sqlInterface, didCursor, dsl, db, col);
- }
-
- @Override
- public Optional> readMetaInfo(MetaInfoKey key) throws
- IllegalArgumentException {
- return getBackendConnection().getMetaInfoHandler().readMetaInfo(getDsl(), key);
- }
-
- @Override
- public void checkMetaDataTables() throws InvalidDatabaseException {
- getSqlInterface().getStructureInterface().checkMetaDataTables(getDsl());
- }
-
- @Override
- public void rollback() {
- try {
- connection.rollback();
- } catch (SQLException ex) {
- sqlInterface.getErrorHandler().handleException(Context.ROLLBACK, ex);
- }
- }
-
- @Override
- public void close() {
- if (!closed) {
- closed = true;
- backendConnection.onTransactionClosed(this);
- try {
- connection.rollback();
- connection.close();
- } catch (SQLException ex) {
- sqlInterface.getErrorHandler().handleException(Context.CLOSE, ex);
- } finally {
- dsl.close();
- }
- }
- }
-
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DataTypeProvider.java b/engine/backend/common/src/main/java/com/torodb/backend/DataTypeProvider.java
deleted file mode 100644
index e53dae35..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/DataTypeProvider.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.torodb.backend.converters.jooq.DataTypeForKv;
-import com.torodb.core.transaction.metainf.FieldType;
-import org.jooq.SQLDialect;
-
-import javax.annotation.Nonnull;
-
-public interface DataTypeProvider {
-
- @Nonnull
- DataTypeForKv> getDataType(FieldType type);
-
- @Nonnull
- SQLDialect getDialect();
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DbBackendService.java b/engine/backend/common/src/main/java/com/torodb/backend/DbBackendService.java
deleted file mode 100644
index 69142cd8..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/DbBackendService.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.torodb.core.services.TorodbService;
-
-import java.sql.Connection;
-
-import javax.sql.DataSource;
-
-public interface DbBackendService extends TorodbService {
-
- public DataSource getSessionDataSource();
-
- public DataSource getSystemDataSource();
-
- public DataSource getGlobalCursorDatasource();
-
- public void disableDataInsertMode();
-
- public void enableDataInsertMode();
-
- public long getDefaultCursorTimeout();
-
- public boolean isOnDataInsertMode();
-
- public boolean includeForeignKeys();
-
- public Connection createSystemConnection();
-
- public Connection createReadOnlyConnection();
-
- public Connection createWriteConnection();
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DefaultDidCursor.java b/engine/backend/common/src/main/java/com/torodb/backend/DefaultDidCursor.java
deleted file mode 100644
index d61f2d2e..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/DefaultDidCursor.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-
-import javax.annotation.Nonnull;
-
-public class DefaultDidCursor extends AbstractCursor {
-
- public DefaultDidCursor(@Nonnull ErrorHandler errorHandler, @Nonnull ResultSet resultSet) {
- super(errorHandler, resultSet);
- }
-
- @Override
- protected Integer read(ResultSet resultSet) throws SQLException {
- return resultSet.getInt(1);
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DefaultDocPartResultCursor.java b/engine/backend/common/src/main/java/com/torodb/backend/DefaultDocPartResultCursor.java
deleted file mode 100644
index 5de847ef..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/DefaultDocPartResultCursor.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-import com.google.common.base.Preconditions;
-import com.torodb.backend.ErrorHandler.Context;
-import com.torodb.core.cursors.Cursor;
-import com.torodb.core.d2r.DocPartResult;
-import com.torodb.core.transaction.metainf.MetaCollection;
-import com.torodb.core.transaction.metainf.MetaDatabase;
-import org.jooq.DSLContext;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.function.Consumer;
-
-import javax.annotation.Nonnull;
-
-/**
- *
- */
-public class DefaultDocPartResultCursor implements Cursor {
-
- private static final int BATCH_SIZE = 1000;
-
- private final SqlInterface sqlInterface;
- private final Cursor didCursor;
- private final DSLContext dsl;
- private final MetaDatabase metaDatabase;
- private final MetaCollection metaCollection;
-
- public DefaultDocPartResultCursor(
- @Nonnull SqlInterface sqlInterface,
- @Nonnull Cursor didCursor,
- @Nonnull DSLContext dsl,
- @Nonnull MetaDatabase metaDatabase,
- @Nonnull MetaCollection metaCollection) {
- this.sqlInterface = sqlInterface;
- this.didCursor = didCursor;
- this.dsl = dsl;
- this.metaDatabase = metaDatabase;
- this.metaCollection = metaCollection;
- }
-
- @Override
- public boolean hasNext() {
- return didCursor.hasNext();
- }
-
- @Override
- public DocPartResult next() {
- if (!hasNext()) {
- throw new NoSuchElementException();
- }
- return getNextBatch(1).get(0);
- }
-
- @Override
- public List getRemaining() {
- List allDocuments = new ArrayList<>();
-
- List readedDocuments;
- while (didCursor.hasNext()) {
- readedDocuments = getNextBatch(BATCH_SIZE);
- allDocuments.addAll(readedDocuments);
- }
-
- return allDocuments;
- }
-
- @Override
- public List getNextBatch(int maxResults) {
- Preconditions.checkArgument(maxResults > 0, "max results must be at least 1, but " + maxResults
- + " was recived");
-
- if (!didCursor.hasNext()) {
- return Collections.emptyList();
- }
-
- try {
- return sqlInterface.getReadInterface().getCollectionResultSets(
- dsl, metaDatabase, metaCollection, didCursor, maxResults
- );
- } catch (SQLException ex) {
- throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex);
- }
- }
-
- @Override
- public void forEachRemaining(Consumer super DocPartResult> action) {
- getRemaining().forEach(action);
- }
-
- @Override
- public void close() {
- didCursor.close();
- }
-}
diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DelegatorField.java b/engine/backend/common/src/main/java/com/torodb/backend/DelegatorField.java
deleted file mode 100644
index b151f2de..00000000
--- a/engine/backend/common/src/main/java/com/torodb/backend/DelegatorField.java
+++ /dev/null
@@ -1,1501 +0,0 @@
-/*
- * ToroDB
- * Copyright © 2014 8Kdata Technology (www.8kdata.com)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package com.torodb.backend;
-
-
-import org.jooq.BetweenAndStep;
-import org.jooq.Binding;
-import org.jooq.Comparator;
-import org.jooq.Condition;
-import org.jooq.Configuration;
-import org.jooq.Converter;
-import org.jooq.DataType;
-import org.jooq.DatePart;
-import org.jooq.Field;
-import org.jooq.QuantifiedSelect;
-import org.jooq.Record1;
-import org.jooq.Result;
-import org.jooq.Select;
-import org.jooq.SortField;
-import org.jooq.SortOrder;
-import org.jooq.WindowIgnoreNullsStep;
-import org.jooq.WindowPartitionByStep;
-
-import java.math.BigDecimal;
-import java.util.Collection;
-import java.util.Map;
-
-/**
- *
- */
-@SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
-public class DelegatorField implements Field {
-
- private static final long serialVersionUID = 4060506762956191613L;
-
- private final Field delegate;
-
- public DelegatorField(Field delegate) {
- this.delegate = delegate;
- }
-
- @Override
- public String getName() {
- return delegate.getName();
- }
-
- @Override
- public String getComment() {
- return delegate.getComment();
- }
-
- @Override
- public Converter, T> getConverter() {
- return delegate.getConverter();
- }
-
- @Override
- public Binding, T> getBinding() {
- return delegate.getBinding();
- }
-
- @Override
- public Class getType() {
- return delegate.getType();
- }
-
- @Override
- public DataType getDataType() {
- return delegate.getDataType();
- }
-
- @Override
- public DataType getDataType(Configuration configuration) {
- return delegate.getDataType(configuration);
- }
-
- @Override
- public Field as(String alias) {
- return delegate.as(alias);
- }
-
- @Override
- public Field as(Field> otherField) {
- return delegate.as(otherField);
- }
-
- @Override
- public boolean equals(Object other) {
- return delegate.equals(other);
- }
-
- @Override
- public int hashCode() {
- return delegate.hashCode();
- }
-
- @Override
- public Field cast(Field field) {
- return delegate.cast(field);
- }
-
- @Override
- public Field cast(DataType type) {
- return delegate.cast(type);
- }
-
- @Override
- public Field cast(Class type) {
- return delegate.cast(type);
- }
-
- @Override
- public Field coerce(Field field) {
- return delegate.coerce(field);
- }
-
- @Override
- public Field coerce(DataType type) {
- return delegate.coerce(type);
- }
-
- @Override
- public Field coerce(Class type) {
- return delegate.coerce(type);
- }
-
- @Override
- public SortField asc() {
- return delegate.asc();
- }
-
- @Override
- public SortField desc() {
- return delegate.desc();
- }
-
- @Override
- public SortField sort(SortOrder order) {
- return delegate.sort(order);
- }
-
- @Override
- public SortField sortAsc(Collection sortList) {
- return delegate.sortAsc(sortList);
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public SortField sortAsc(T... sortList) {
- return delegate.sortAsc(sortList);
- }
-
- @Override
- public SortField sortDesc(Collection sortList) {
- return delegate.sortDesc(sortList);
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public SortField sortDesc(T... sortList) {
- return delegate.sortDesc(sortList);
- }
-
- @Override
- public SortField sort(Map sortMap) {
- return delegate.sort(sortMap);
- }
-
- @Override
- public Field neg() {
- return delegate.neg();
- }
-
- @Override
- public Field add(Number value) {
- return delegate.add(value);
- }
-
- @Override
- public Field add(Field> value) {
- return delegate.add(value);
- }
-
- @Override
- public Field plus(Number value) {
- return delegate.plus(value);
- }
-
- @Override
- public Field plus(Field> value) {
- return delegate.plus(value);
- }
-
- @Override
- public Field sub(Number value) {
- return delegate.sub(value);
- }
-
- @Override
- public Field sub(Field> value) {
- return delegate.sub(value);
- }
-
- @Override
- public Field subtract(Number value) {
- return delegate.subtract(value);
- }
-
- @Override
- public Field subtract(Field> value) {
- return delegate.subtract(value);
- }
-
- @Override
- public Field minus(Number value) {
- return delegate.minus(value);
- }
-
- @Override
- public Field minus(Field> value) {
- return delegate.minus(value);
- }
-
- @Override
- public Field mul(Number value) {
- return delegate.mul(value);
- }
-
- @Override
- public Field mul(Field extends Number> value) {
- return delegate.mul(value);
- }
-
- @Override
- public Field multiply(Number value) {
- return delegate.multiply(value);
- }
-
- @Override
- public Field multiply(Field extends Number> value) {
- return delegate.multiply(value);
- }
-
- @Override
- public Field div(Number value) {
- return delegate.div(value);
- }
-
- @Override
- public Field