diff --git a/.gitignore b/.gitignore index c007f3ca..c62fc224 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,13 @@ -**/.idea/ target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +pom.xml.next +release.properties +dependency-reduced-pom.xml +buildNumber.properties +.mvn/timing.properties +**/.idea/ *.iml **/nbproject/** **/nbactions.xml @@ -8,7 +16,8 @@ target/ .project .classpath .settings/ +.checkstyle hs_err*.log /documentation/static /documentation/site -.DS_Store +.DS_Store \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 3680405e..9637f2a8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,30 +1,49 @@ language: java -sudo: false +sudo: required dist: trusty -script: mvn -Psafer -Pintegration -Passembler -B -e -T 1C verify jdk: oraclejdk8 -env: - # the SONAR_TOKEN variable - - secure: "eQaksffQhrlaAKEFWIGR7Wbo01QKscfPl7MOQHQAytP74uU6in1VcPITdHr2UgTH/PhjMJ5Uevw7mrzDN+pDE8csNxVF7JozEAIh6DYhmBmmBOYu3+AypDqdtaBOmgAPkTLxSthiy5hsmOgCjfXPGrWqpQJ918n0RtdIL1p7W4L5N3I2yDcQOpFSXXizWADpGH/fErhihKdepNCco0JCfxLmpFa+i+goqsF1EEJPb2Ylz4LYhlez2NqULGQWio2+ucrYLDZHFTrdRyQ9Tq/RA62zdynmWQAYuE5nYmO1vmvk2mR+xQpIyevvAQY4RmffYKkK/Hq334H8rIH7etCn/e0fzaLnOveyIttoG0yaqpY/mPIZkOQBV0pSjCFONvt0gsgFfPYv5sHvRBM2dj1knwOLqwhqseR6en8+TaqeP2aYj/ittU3+7QJ9yHorfAYG14ofxm+Ue4o9wJ5FsyAzw5IpYnu28WRGy7+7kFfzo1prNh7dlqjDKrBMeK292seVPH+RDMaIs2ZAzfUZElhOtqvXGsuBfDtO2FG7L9ke+tbVB/z/srM0cXPJW0xcPdoeVgoH5+CyLTaR4+8tT0recQwfKrl7zluwKk+zWmxCs5BuV+JHcWrgC+sD0iKvOHbf/w+s5bLOs0kB4YjoiUZ0pK+9NphR/Mew7shnTFZi63o=" - # TODO: Add a SONAR_GITHUB_TOKEN to support Sonar on pull requests (or wait until they improve their compatibility) +before_install: + - wget https://archive.apache.org/dist/maven/maven-3/3.2.2/binaries/apache-maven-3.2.2-bin.tar.gz + - tar xf apache-maven-3.2.2-bin.tar.gz + - export M2_HOME=$PWD/apache-maven-3.2.2 + - export PATH=$M2_HOME/bin:$PATH +#Required to use Codacy + - sudo apt-get install jq + - wget -O ~/codacy-coverage-reporter-assembly-latest.jar $(curl https://api.github.com/repos/codacy/codacy-coverage-reporter/releases/latest | jq -r .assets[0].browser_download_url) addons: - sonarqube: - branches: - - devel - - master apt: packages: - oracle-java8-installer + +cache: + directories: + - $HOME/.m2/repository + +before_cache: + - rm -f $HOME/.m2/repository/com/torodb before_script: -- export GIT_BRANCH=$TRAVIS_BRANCH + - export GIT_BRANCH=$TRAVIS_BRANCH -cache: - directories: - - '$HOME/.sonar/cache' +script: | + if [ "$TRAVIS_EVENT_TYPE" == cron ] && [ "$TRAVIS_BRANCH" == devel ] + then + bash .travis/build-packages + else + mvn -Psafer -Pintegration -B -e -T 1C -Dcheckstyle.consoleOutput=false verify + fi after_success: - - if [[ $TRAVIS_REPO_SLUG = torodb/torodb ]]; then bash <(curl -s https://codecov.io/bash) || echo 'Codecov did not collect coverage reports'; else echo 'Codecov not notified'; fi - - if [[ $TRAVIS_REPO_SLUG = torodb/torodb ]]; then mvn sonar:sonar || echo 'Error while notifying SonarQube'; else echo 'SonarQube not notified'; fi + - | + if [ "$TRAVIS_EVENT_TYPE" != cron ] && [ "$TRAVIS_REPO_SLUG" == torodb/stampede ] + then + # Upload reports to Codecov + bash <(curl -s https://codecov.io/bash) || echo 'Codecov did not collect coverage reports'; + # Upload reports to Codacy + java -cp ~/codacy-coverage-reporter-assembly-latest.jar com.codacy.CodacyCoverageReporter -l Java -r reporting/target/site/jacoco-aggregate/jacoco.xml || echo 'Codacy report fail' + else + echo 'Skipping a metrics reports because this repo/build is not permitted' + fi + \ No newline at end of file diff --git a/.travis/build-packages b/.travis/build-packages new file mode 100644 index 00000000..c3a053ca --- /dev/null +++ b/.travis/build-packages @@ -0,0 +1,169 @@ +#!/bin/bash + +set -e +x +v +o history + +function finish { + rm -f ~/.aws/config + + if [ -f ~/.m2/settings.xml ] + then + rm ~/.m2/settings.xml + fi + if [ -f ~/.m2/settings.xml.bak ] + then + mv ~/.m2/settings.xml.bak ~/.m2/settings.xml + fi + + if [ -f ~/.ssh/id_rsa ] + then + rm -f ~/.ssh/id_rsa + fi + if [ -f ~/.ssh/id_rsa.bak ] + then + mv ~/.ssh/id_rsa.bak ~/.ssh/id_rsa + fi + if [ -f ~/.ssh/id_rsa.pub ] + then + rm -f ~/.ssh/id_rsa.pub + fi + if [ -f ~/.ssh/id_rsa.pub.bak ] + then + mv ~/.ssh/id_rsa.pub.bak ~/.ssh/id_rsa.pub + fi + + if [ -f ~/.ssh/sign ] + then + rm -f ~/.ssh/sign + fi + if [ -f ~/.ssh/sign.bak ] + then + mv ~/.ssh/sign.bak ~/.ssh/sign + fi + if [ -f ~/.ssh/sign.pub ] + then + rm -f ~/.ssh/sign.pub + fi + if [ -f ~/.ssh/sign.pub.bak ] + then + mv ~/.ssh/sign.pub.bak ~/.ssh/sign.pub + fi + + gpg --delete-secret-key --batch --yes "$launchpad_sign_public_key_fingerprint" || true + gpg --delete-key --batch --yes "$launchpad_sign_public_key_fingerprint" || true + + rm -f ~/.config/copr +} + +trap finish SIGINT SIGTERM EXIT + + +echo "Building binary package" + +mkdir -p ~/.aws +echo " +[default] +aws_access_key_id=$aws_access_key_id +aws_secret_access_key=$aws_secret_access_key +region=eu-west-1 +output=json +" > ~/.aws/config + +set -x +mvn package -f main/pom.xml -P assembler -Ds3.push=true -DskipTests +set +x + + +echo "Building docker package" + +mkdir -p ~/.m2 +if [ -f ~/.m2/settings.xml ] +then + mv ~/.m2/settings.xml ~/.m2/settings.xml.bak +fi +echo " + + + + docker.io + $docker_username + $docker_password + + + +" > ~/.m2/settings.xml + +set -x +mvn package -f main/pom.xml -P docker -Ddocker.skip.push=false -DskipTests +set +x + + +echo "Building deb package" + +if [ -f ~/.ssh/id_rsa ] +then + mv ~/.ssh/id_rsa ~/.ssh/id_rsa.bak +fi +if [ -f ~/.ssh/id_rsa.pub ] +then + mv ~/.ssh/id_rsa.pub ~/.ssh/id_rsa.pub.bak +fi + +echo "$launchpad_private_key" > ~/.ssh/id_rsa +echo "$launchpad_public_key" > ~/.ssh/id_rsa.pub +chmod 600 ~/.ssh/id_rsa +chmod 644 ~/.ssh/id_rsa.pub + +echo "$launchpad_sign_private_key" > ~/.ssh/sign +echo "$launchpad_sign_public_key" > ~/.ssh/sign.pub +chmod 600 ~/.ssh/sign +chmod 644 ~/.ssh/sign.pub + +gpg --import ~/.ssh/sign.pub +gpg --import ~/.ssh/sign +echo "$(echo "$launchpad_sign_public_key_fingerprint"|tr -d '[:space:]'):6:"|gpg --import-ownertrust + +echo " +[8kdata-release] +fqdn = ppa.launchpad.net +method = ftp +incoming = ~8kdata/ubuntu/ppa/ +login = anonymous +allow_unsigned_uploads = 0 + +[8kdata-devel] +fqdn = ppa.launchpad.net +method = ftp +incoming = ~8kdata/ubuntu/ppa-dev/ +login = anonymous +allow_unsigned_uploads = 0 +" > ~/.dput.cf + +set -x +mvn package -f main/pom.xml -P deb -Dlaunchpad.push=true -DskipTests -Dpackage.name=torodb-stampede +mvn package -f main/pom.xml -P deb -Dlaunchpad.push=true -DskipTests -Dpackage.name=torodb-stampede-postgres +set +x + + +echo "Building snap package" + +set -x +mvn package -f main/pom.xml -P snap -Dlaunchpad.push=true -DskipTests -Dpackage.name=torodb-stampede +mvn package -f main/pom.xml -P snap -Dlaunchpad.push=true -DskipTests -Dpackage.name=torodb-stampede-postgres +set +x + + +echo "Building rpm package" + +mkdir -p ~/.config +echo " +[copr-cli] +login = $copr_login +username = $copr_user +token = $copr_token +copr_url = https://copr.fedorainfracloud.org +" > ~/.config/copr + +set -x +mvn package -f main/pom.xml -P rpm -Dcopr.push=true -DskipTests -Dpackage.name=torodb-stampede +mvn package -f main/pom.xml -P rpm -Dcopr.push=true -DskipTests -Dpackage.name=torodb-stampede-postgres +set +x diff --git a/README.md b/README.md index f2be53c8..f79dc081 100644 --- a/README.md +++ b/README.md @@ -1,43 +1,59 @@ -# ToroDB +# ToroDB Stampede -[![Master branch build status](https://travis-ci.org/torodb/torodb.svg?branch=master)](https://travis-ci.org/torodb/torodb) [![Quality Gate](https://sonarqube.com/api/badges/gate?key=com.torodb:torodb-pom)](https://sonarqube.com/dashboard/index/com.torodb:torodb-pom) +> Transform your NoSQL data from a MongoDB replica set into a relational database in PostgreSQL. -ToroDB is a technology designed to fulfill the gap between document oriented -and SQL databases. There are two products that use this technology: ToroDB -Server and ToroDB Stampede. Both platforms are open source and any feedback, -contributions, help and/or patches are very welcome. Please join the -[torodb-dev][2] mailing list for further discussion. +There are other solutions that are able to store the JSON document in a +relational table using PostgreSQL JSON support, but it doesn't solve the real +problem of 'how to really use that data'. ToroDB Stampede replicates the +document structure in different relational tables and stores the document data +in different tuples using those tables. -For more information, please see [ToroDB's website][1] +![](documentation/docs/images/tables_distribution.jpeg) -## ToroDB Server -It is a MongoDB-compatible server that supports speaks the MongoDB Wire -Protocol (and therefore can be used with the same drivers used to connect to -any standard MongoDB server) but stores your data into a reliable and trusted -ACID database. +## Installation -More information about ToroDB Server can be found on [its own folder](/server) -in this repository. +Due to the use of different external systems like MongoDB and PostgreSQL, the +installation requires some previous steps. Take a look at out +[quickstart][1] in the +documentation. -## ToroDB Stampede -ToroDB Stampede is a business analytic solution that replicates your data in -real time from a MongoDB replica set into a SQL database, allowing you to use -any business intelligence products (like [Tableau][3] or [Pentaho][4]) to -analyze NoSQL data. +## Usage example -More information about ToroDB Stampede can be found on -[its own folder](/stampede) in this repository. +MongoDB is a great idea, but sooner or later some kind of business +intelligence, or complex aggregated queries are required. At this point MongoDB +is not so powerful and ToroDB Stampede borns to solve that problem (see +[our post about that][2]). -## Code QA - * Master branch build status: [![Master branch build status](https://travis-ci.org/torodb/torodb.svg?branch=master)](https://travis-ci.org/torodb/torodb) [![Quality Gate](https://sonarqube.com/api/badges/gate?key=com.torodb:torodb-pom)](https://sonarqube.com/dashboard/index/com.torodb:torodb-pom) - * Devel branch build status : [![Devel branch build status](https://travis-ci.org/torodb/torodb.svg?branch=devel)](https://travis-ci.org/torodb/torodb) [![Quality Gate](https://sonarqube.com/api/badges/gate?key=com.torodb:torodb-pom:devel)](https://sonarqube.com/dashboard/index/com.torodb:torodb-pom:devel) +The kind of replication done by ToroDB Stampede allows the execution of +aggregated queries in a relational backend (PostgreSQL) with a noticeable time +improvement. -## Are you a developer? Want to contribute? Questions about the source code? +A deeper explanation is available in our +[how to use][3] section in the +documentation. -Please see [CONTRIBUTING][5]. +## Development setup -[1]: http://www.torodb.com -[2]: https://groups.google.com/forum/#!forum/torodb-dev -[3]: http://www.tableau.com -[4]: http://www.pentaho.com/ -[5]: https://github.com/torodb/torodb/blob/master/CONTRIBUTING.md +As it was said in the installation section, the requirements of external +systems can make more difficult to explain briefly how to setup the development +environment here. So if you want to take a look on how to prepare your +development environment, take a look to our +[documentation][4]. + +## Release History + +* 1.0.0-beta2 + * Released on April 06th 2017 +* 1.0.0-beta1 + * Released on December 30th 2016 + +## Meta + +ToroDB – [@nosqlonsql](https://twitter.com/nosqlonsql) – info@8kdata.com + +Distributed under the GNU AGPL v3 license. See ``LICENSE`` for more information. + +[1]: https://www.torodb.com/stampede/docs/quickstart +[2]: https://www.8kdata.com/blog/the-conundrum-of-bi-aggregate-queries-on-mongodb/ +[3]: https://www.torodb.com/stampede/docs/how-to-use +[4]: https://www.torodb.com/stampede/docs/installation/previous-requirements/ diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 00000000..465ffbcd --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,29 @@ +## Release Notes for Stampede 1.0.0-beta2 + +### Changes + +* Add support for MongoDB 3.4 + * Deal with MongoDB views + * Support BSON Type Decimal128 +* Support Sharding Replication. ToroDB Stampede can replicate from N shards into the same ToroDB database. + * Adjust Guice to provide a better way to inject different metrics and loggers + * Adapt metrics and logging so each shard has their own values + * Adapt the Data Import Mode to the sharding model +* Stampede Packaging + * RPM package + * DEB package + * Snap package +* Allow SSL connection to the backend (PostgreSQL) +* Add FlexyPool to ToroDB +* Integration Tests +* Support all BSON types +* Deal with system collections +* Unify logging system and improve error messages +* Calibrate maximum threads using also connection pool size +* Review and test Windows/Mac installation/configuration documentation +* Improve ToroDB Parameter configuration + + +### Bugs Fixed + +* Stampede did not support documents whose '\_id' is a container (object or array) diff --git a/build-tools/pom.xml b/build-tools/pom.xml deleted file mode 100644 index 33032fcd..00000000 --- a/build-tools/pom.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - 4.0.0 - com.torodb - build-tools - 0.50.0 - jar - - ToroDB: Build Tools - A project used to store resources and tools that the build - process can import as a dependency - - - UTF-8 - true - true - - \ No newline at end of file diff --git a/build-tools/src/main/resources/com/torodb/buildtools/checkstyle.xml b/build-tools/src/main/resources/com/torodb/buildtools/checkstyle.xml deleted file mode 100644 index 01a2ac87..00000000 --- a/build-tools/src/main/resources/com/torodb/buildtools/checkstyle.xml +++ /dev/null @@ -1,246 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/build-tools/src/main/resources/com/torodb/buildtools/torodb-license.txt b/build-tools/src/main/resources/com/torodb/buildtools/torodb-license.txt deleted file mode 100644 index 54d73451..00000000 --- a/build-tools/src/main/resources/com/torodb/buildtools/torodb-license.txt +++ /dev/null @@ -1,15 +0,0 @@ -${project.name} -Copyright © ${project.inceptionYear} ${owner} (${email}) - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . \ No newline at end of file diff --git a/stampede/documentation/README.md b/documentation/README.md similarity index 100% rename from stampede/documentation/README.md rename to documentation/README.md diff --git a/stampede/documentation/css/8kdata.css b/documentation/css/8kdata.css similarity index 100% rename from stampede/documentation/css/8kdata.css rename to documentation/css/8kdata.css diff --git a/stampede/documentation/diagrams_soures/diagrams.key b/documentation/diagrams_soures/diagrams.key similarity index 100% rename from stampede/documentation/diagrams_soures/diagrams.key rename to documentation/diagrams_soures/diagrams.key diff --git a/stampede/documentation/diagrams_soures/toro_stampede_structure.drawing b/documentation/diagrams_soures/toro_stampede_structure.drawing similarity index 100% rename from stampede/documentation/diagrams_soures/toro_stampede_structure.drawing rename to documentation/diagrams_soures/toro_stampede_structure.drawing diff --git a/stampede/documentation/docs/about.md b/documentation/docs/about.md similarity index 72% rename from stampede/documentation/docs/about.md rename to documentation/docs/about.md index 8346782b..bf7f06f4 100644 --- a/stampede/documentation/docs/about.md +++ b/documentation/docs/about.md @@ -4,7 +4,8 @@ Connected to a MongoDB replica set, ToroDB Stampede is able to replicate the NoS ![ToroDB Stampede Structure](images/toro_stampede_structure.jpg) -There are other solutions that are able to store the JSON document in a relational table using PostgreSQL JSON support, but it doesn't solve the real problem of 'how to really use that data'. ToroDB Stampede replicates the document structure in different relational tables and stores the document data in different tuples using those tables. +There are other solutions that are able to store the JSON document in a relational table using PostgreSQL JSON support, but it doesn't solve the real problem of 'how to really use that data'. +ToroDB Stampede replicates the document structure in different relational tables and stores the document data in different tuples using those tables. ![Mapping example](images/toro_stampede_mapping.jpg) @@ -19,7 +20,6 @@ Not everything could be perfect and there are some known limitations from ToroDB * If character `\0` is used in a string it will be escaped because PostgreSQL doesn't support it. * Command `applyOps` reception will stop the replication server. * Command `collMod` reception will be ignored. -* MongoDB sharding environment are not supported currently. In addition to the previous limitations, just some kind of indexes are supported: @@ -28,6 +28,16 @@ In addition to the previous limitations, just some kind of indexes are supported * All keys path with the exception to the paths resolving in scalar value (eg: `db.test.createIndex({"a": 1})` will not index value of key `a` for the document `{"a": [1,2,3]}`) * Index properties `sparse` and `background` are ignored +## When ToroDB Stampede might not be the right choice + +As good as Stampede is, there are certain use-cases for which it is a bad choice or simply will not work: + +* Pattern "key as values". When keys contain values, potentially thousands of different values may appear in keys, leading to an equally high number of columns +(which might break with some RDBMS which have limits to the number of columns per row, see next point) and/or tables, which might be terribly inconvenient and slow. +* Too many fields per document, several of them optional and only some appearing per document, which might lead to thousands of columns. +Some RDBMSs do not support such a high number of columns. For PostgreSQL this limit is around 1600 columns. + + [TODO]: <> (not supported types, we need a list) [Versions]: <> (this section doesn't make any sense currently) diff --git a/stampede/documentation/docs/appendix.md b/documentation/docs/appendix.md similarity index 95% rename from stampede/documentation/docs/appendix.md rename to documentation/docs/appendix.md index abb3b330..4607676a 100644 --- a/stampede/documentation/docs/appendix.md +++ b/documentation/docs/appendix.md @@ -17,12 +17,13 @@ Usage: `torodb-stampede [options]` | --backend-database | The database that will be used. | | --backend-host | The host or ip that will be used to connect. | | --backend-port | The port that will be used to connect. | +| --backend-ssl | Enable SSL for backend connection. | | --backend-user | The user that will be used to connect. | | -c, --conf | Configuration file in YAML format. | | --connection-pool-size | Maximum number of connections to establish to the database. It must be higher or equal than 3. | | --connection-pool-timeout | The timeout in milliseconds after which retrieve a connection from the pool will fail. | | --enable-metrics | Enable metrics system. | -| --enable-ssl | Enable SSL/TLS layer. | +| --enable-ssl | Enable SSL/TLS for replication layer. | | -h, --help | Print help and exit. | | -hp, --help-param | Print help for all available parameters and exit. |  | --log-level | Level of log emitted (will overwrite default log4j2 configuration) | @@ -41,9 +42,9 @@ Usage: `torodb-stampede [options]` | --ssl-key-store-password | The password of the Java Key Store file containing and private key used to authenticate client. | | --ssl-trust-store-file | The path to the Java Key Store file containing the Certification Authority. If CAFile is specified it will be used instead. | | --ssl-trust-store-password | The password of the Java Key Store file containing the Certification Authority. | -| --sync-source | The host and port (:) of the node from ToroDB has to replicate. | +| --sync-source | The host and port (:) of the MongoDB node from ToroDB has to replicate. | | --toropass-file | You can specify a file that use .pgpass syntax: `::::` (can have multiple lines) | -| --version | Prints the version. | +| --version | Prints the version and exit. | | -x, --xml-conf | Configuration file in XML format. | ## Configuration file @@ -102,6 +103,7 @@ Another way to configure the system is through configuration file or setting con | /backend/postgres/user | The user that will be used to connect. | | /backend/postgres/toropassFile | Path to the file with PostgreSQL access configuration in  `.pgpass` syntax. | | /backend/postgres/applicationName | The application name used by driver to connect. | +| /backend/postgres/ssl | If `true` Enabled the SSL connection with PostgreSQL server, if `false` is disabled. | ### ToroDB Stampede pool configuration diff --git a/stampede/documentation/docs/bi-connectors.md b/documentation/docs/bi-connectors.md similarity index 100% rename from stampede/documentation/docs/bi-connectors.md rename to documentation/docs/bi-connectors.md diff --git a/stampede/documentation/docs/consistency.md b/documentation/docs/consistency.md similarity index 100% rename from stampede/documentation/docs/consistency.md rename to documentation/docs/consistency.md diff --git a/stampede/documentation/docs/css/8kdata.css b/documentation/docs/css/8kdata.css similarity index 100% rename from stampede/documentation/docs/css/8kdata.css rename to documentation/docs/css/8kdata.css diff --git a/stampede/documentation/docs/dev-notes.md b/documentation/docs/dev-notes.md similarity index 86% rename from stampede/documentation/docs/dev-notes.md rename to documentation/docs/dev-notes.md index 3865fa6f..f77af557 100644 --- a/stampede/documentation/docs/dev-notes.md +++ b/documentation/docs/dev-notes.md @@ -10,25 +10,25 @@ En esta sección se decriben los pasos a seguir para instalar MongoDB Community Importar la clave pública utilizada por el gestor de paquetes de Ubuntu. ``` -$ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927 +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927 ``` Crear la lista de fuentes para MongoDB. ``` -$ echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list +echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list ``` Actualizar el listado de paquetes del sistema. ``` -$ sudo apt-get update +sudo apt-get update ``` Instalar el paquete de MongoDB Community Edition. ``` -$ sudo apt-get install mongodb-org +sudo apt-get install mongodb-org ``` Crear el fichero `/lib/systemd/system/mongod.service`. __Sólo para Ubuntu 16.04__. @@ -51,7 +51,7 @@ WantedBy=multi-user.target Llegados a este punto, MongoDB debería estar correctamente instalado, para arrancar o parar el servicio usaremos el comando `service`. Por ejemplo para reiniciar el servicio haremos: ``` -$ sudo service mongod restart +sudo service mongod restart ``` Si ejecutamos el comando `mongo` podremos ver como se accede a la consola de MongoDB y se pueden ejecutar los diferentes comandos de MongoDB. @@ -74,7 +74,7 @@ replication: Hecho esto, reiniciamos el servicio. ``` -$ sudo service mongod restart +sudo service mongod restart ``` Ahora podemos acceder a la consola de MongoDB con el comando `mongo` para poder completar la configuración del replica set. Para ello, lo único que debemos hacer es inicializar el nodo como un replica set con el siguiente comando. @@ -101,14 +101,14 @@ Se puede encontrar más información en el siguiente [enlace](https://www.digita Instalación de los paquetes necesarios. ``` -$ sudo apt-get update -$ sudo apt-get install postgresql postgresql-contrib +sudo apt-get update +sudo apt-get install postgresql postgresql-contrib ``` Podemos comprobar que la instalación ha sido satisfactoria accediendo a la consola de PostgreSQL. ``` -$ sudo -u postgres psql +sudo -u postgres psql ``` ## Java @@ -120,7 +120,7 @@ ToroDB Stampede está escrito en Java y por tanto es necesario tener instalada u La instalación de Oracle Java 8 en Ubuntu 16.04 se hace a partir de paquetes del sistema, por lo que basta ejecutar los siguientes comandos. ``` -$ sudo add-apt-repository ppa:webupd8team/java -$ sudo apt-get update -$ sudo apt-get install oracle-java8-installer +sudo add-apt-repository ppa:webupd8team/java +sudo apt-get update +sudo apt-get install oracle-java8-installer ``` diff --git a/documentation/docs/faq.md b/documentation/docs/faq.md new file mode 100644 index 00000000..6175d35b --- /dev/null +++ b/documentation/docs/faq.md @@ -0,0 +1,111 @@ +

Frequently Asked Questions

+ +## Why that name? + +Toro means bull in Spanish. ToroDB was founded in Madrid, Spain, by [8Kdata](https://8kdata.com/). It is the very first general-purpose database software ever built by a Spanish entity. We are very proud of this fact and wanted to name it after a well-known symbol of Spain, the toro. And the toro is a fast, solid, strong, but noble animal. Just like ToroDB. + +## If ToroDB uses PostgreSQL, why not just base it on jsonb? + +jsonb is a really cool data type for PostgreSQL, with a rich function set support that allows JSON data in a regular column, and it supports advanced indexing. jsonb was intended to allow adding some unstructured column(s) to your relational tables, and it fits really well for that purpose. But ToroDB's design and goals go way beyond jsonb's: + +* Transform your unstructured data to a relational design, that leads to significant improvements in storage/IO/cache, having data partitioned by "type" and automatic data normalization. + +* Provide native support for a NoSQL API --like ToroDB does with MongoDB's wire protocol and query API-- so you could directly use your MongoDB drivers, code and tools to interface with the database. + +* Offer replication and sharding the same way NoSQL does (like replicating from a MongoDB replica set). + +* Support non-PostgreSQL backends. While we love PostgreSQL, one size does not fit all, and other people have different requirements or different environments, like MPP (Massively Parallel) databases, in-memory solutions or just different stacks. + +Still, ToroDB uses a little bit of jsonb internally: to represent arrays of scalar values; and to represent the structure table, which stores the "shape" ("type") of the documents in the collection. + +## What about ToroDB's performance? + +Contrary to some popular beliefs, RDBMSs are not slow. Indeed, they can be quite fast. It's not hard, for instance, to achieve dozens or [hundreds of thousands of tps on RDBMSs like PostgreSQL](http://obartunov.livejournal.com/181981.html). The main problem is that benchmarks usually compare apples to oranges. Durability, for instance, is frequently reduced or suppressed in most NoSQL benchmarks, while it significantly impacts performance. The same goes on with replication. Take for instance a typical MongoDB benchmark, add journaling and replication (which you will very likely have turned on in a production environment), and your numbers will drop by an order of magnitude (160K tps vs 32K tps, 50% reads + 50% writes: [http://obartunov.livejournal.com/181981.html](http://obartunov.livejournal.com/181981.html)). + +## What databases does ToroDB support as backends? Are there any plans to support other backends? + +Currently, ToroDB supports PostgreSQL as a backend. However, design and code have always kept in mind the possibility of supporting other backends. So it's technically possible and it will happen. Stay tuned! + +## How do I optimally configure PostgreSQL for ToroDB? + +As per ToroDB, there are no special configuration parameters required. So it really depends on your hardware characteristics, workload, network architecture and so on. Usual PostgreSQL configuration recommendations apply. There are hundreds of places on the Internet that discuss how to do this. You may start from [Tuning Your PostgreSQL Server](https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server) if you need some help. + +Here are some recommendations though: + +As with any other Postgres configuration, don't forget to tune the "ususal suspects" such as shared_buffers and checkpoint_segments (or max_wal_size if on 9.5). + +Be aware of the memory allocated for PostgreSQL and the JVM if they are both co-located. If this is the case, you may probably want to allocate shared_buffers as you usually do, but reduce effective_cache_size by at least the maximum amount of heap allocated by the JVM (-Xmx). + +Consider [setting synchronous_commit](http://www.postgresql.org/docs/9.4/static/runtime-config-wal.html) to off if you can tolerate some potential data loss. This will not corrupt your data in any way, and may improve performance. It is similar to MongoDB's behavior, where you may get writes acknowledged that may be lost if the server crashes during a small time window after the write happened. Please review wal_writer_delay if setting synchronous_commit to off to control the risk of potential data loss. + +Make sure that ToroDB's configuration parameters generic.connectionPoolSize and generic.reservedReadPoolSize do not add up to more than max_connections. + +Use data checksums for your PostgreSQL cluster if you want checksum validation at rest. + +## What is ToroDB's license? + +ToroDB is licensed under the GNU Affero General Public License v3 ([AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html)). This means that ToroDB is free software, and you may freely use it, run it, modify and inspect it, as long as you comply with the terms of the license. As a non authoritative summary, this basically means that: + +ToroDB is provided free of charge. Just download and use it. +If you make a derived version of ToroDB, or integrate ToroDB with other software, all of it must also be licensed under the AGPLv3 or a compatible license. This implies that users of your software will also have the same rights as ToroDB users, including access to ToroDB's source code. Copyright must also be preserved. + +If you offer ToroDB or a derived work as a hosted service (like a DbaaS --Database as a Service--), your users are also bound by this license and the rights granted by the license also apply to them. + +If you want to create a derived work or integrate ToroDB or parts of it into proprietary software, or do not want to be bound by the terms of the AGPLv3, please contact us at torodb at torodb dot com. + +## What is MongoWP and how is it related to ToroDB? + +MongoWP (Mongo Wire Protocol) is a component layer of ToroDB. However, it is being developed independently of ToroDB, and it is available at a [separate Github repository](https://github.com/8kdata/mongowp). MongoWP provides an abstraction layer for any Java-based software that would want to behave as a MongoDB server. It implements the MongoDB wire protocol and abstracts mongowp users from it. Just implement mongowp's API and start coding your own MongoDB server! It may also be the basis for other MongoDB-protocol related software such as clients (there's some basic client support in mongowp), proxies, query routers, etc. + +MongoWP is based on Netty, a great asynchronous network I/O framework for the JVM. Netty is based on the event-based architecture, which does allocate a small number of threads for incoming connections, rather than a thread-per-connection, resulting in a really fast request dispatcher. + +## What other open source components does ToroDB use? + +* [PostgreSQL](http://www.postgresql.org/). The most advanced open source database. + +* [Netty](http://netty.io/), used by MongoWP. The great asynchronous network I/O framework for the JVM. + +* [jOOQ](http://www.jooq.org/). jOOQ generates Java code from your database and lets you build type safe SQL queries through its fluent API. + +* [HikariCP](http://brettwooldridge.github.io/HikariCP/). The fastest Java connection pooler. + +There are also many other Java libraries used by ToroDB like [ThreeTen](http://www.threeten.org/), [Guava](https://github.com/google/guava), [Guice](https://github.com/google/guice), [Findbugs](http://findbugs.sourceforge.net/), [jCommander](http://jcommander.org/), [Jackson](http://wiki.fasterxml.com/JacksonHome) and some others. We also use [Travis](https://travis-ci.org/) for CI tests. + +ToroDB has the deepest gratitude to all the above projects, that are great components, and every other bit of open source that directly or indirectly helps building or running ToroDB. + +## Which indexes are created? + +ToroDB Stampede doesn't support all index types. Some indexes are supported or partialy supported, and other are skipped. + + * **Single field indexes**: Are fully supported. + * **Compound indexes**: Are not supported and are not created. + * **Multikey indexes**: The only multikey indexes created in ToroDB Stampede are those whose field(s) are in a embedded document. Multikey indexes over scalar values of an array are not created. + * **Text indexes**: Are not supported and are not created. + * **2dsphere indexes**: Are not supported and are not created. + * **2d indexes**: Are not supported and are not created. + * **Hashed indexes**: Are not supported and are not created. + +Any created index can be explicitly [excluded in the configuration](installation/configuration.md#exclude-a-mongodb-index) + +## The command wget is not found in macOS + +By default macOS hasn't the wget tool in the terminal, if you want to use it [Homebrew](http://brew.sh) can be used. + +Once installed Homebrew, it can be installed with `brew install wget`. + +## No pg_hba.conf entry + +Depending on the running Linux distribution and PostgreSQL installation, the error below could appear. + +``` +FATAL: no pg_hba.conf entry for host "...", user "...", database "...", SSL off +``` + +This happens because some installations of PostgreSQL are configured with strict security policies. So PostgreSQL reject host connections through TCP. The `pg_hba.conf` file (usually located in the PostgreSQL's data directory or configuration directory) must be edited with a rule that allows access to the database for the ToroDB Stampede user. + +``` + host torod torodb 127.0.0.1/32 md5 + host torod torodb ::1/128 md5 +``` + +__Make sure that new rules precede any other rule for same host that apply to all users (eg: 127.0.0.1/32). For more informations on `pg_hba.conf` refer to the [Official PostgreSQL documentation](https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html)__. diff --git a/stampede/documentation/docs/glossary.md b/documentation/docs/glossary.md similarity index 100% rename from stampede/documentation/docs/glossary.md rename to documentation/docs/glossary.md diff --git a/stampede/documentation/docs/how-to-use.md b/documentation/docs/how-to-use.md similarity index 92% rename from stampede/documentation/docs/how-to-use.md rename to documentation/docs/how-to-use.md index 2952adbd..4fdbb6ec 100644 --- a/stampede/documentation/docs/how-to-use.md +++ b/documentation/docs/how-to-use.md @@ -4,10 +4,10 @@ To understand better how the JSON document to relational storage mapping algorit Given that ToroDB Stampede and all its requisites are met, the dataset will be imported into MongoDB to be replicated in PostgreSQL. This is done with next commands. -``` -$ wget https://www.dropbox.com/s/570d4tyt4hpsn03/primer-dataset.json?dl=0 +```no-highlight +wget https://www.torodb.com/download/primer-dataset.json -$ mongoimport -d stampede -c primer primer-dataset.json +mongoimport -d stampede -c primer primer-dataset.json ``` The import was done with database `stampede` and collection `primer`, this is important because it determines the schema and table names created in the relational storage. In PostgreSQL the replication is done in the `torod` database, schema `stampede` with one root table #`primer` and some associated tables named like `primer_*`. @@ -54,7 +54,7 @@ As stated above, the root of the document is mapped to a table with the name use Each element of the root level is mapped to a different column of the table, either an scalar or subdocument. Next chapter contains the different datatypes that can be created in the relational schema. All of them are indicated as a postfix of the column name, for example `cuisine` key is created as `cuisine_s` because it contains string values. -``` +```no-highlight did | address_e | restaurant_id_s | name_s | cuisine_s | _id_x | borough_s | grades_e -----+-----------+-----------------+----------------------------------------------------------------------------------------------------+------------------------------------------------------------------+----------------------------+---------------+---------- 0 | f | 40384115 | Phil & Sons Restaurant & Pizzeria | Pizza/Italian | \x580f12efbe6e3fff2237caef | Queens | t @@ -69,7 +69,7 @@ did | address_e | restaurant_id_s | #### primer_address -``` +```no-highlight did | rid | seq | zipcode_s | coord_e | street_s | building_s -----+-------+-----+-----------+---------+----------------------------------------+------------ 0 | 0 | | 11355 | t | Main Street | 57-29 @@ -84,9 +84,9 @@ did | rid | seq | zipcode_s | coord_e | street_s #### primer_address_coord -The table `primer_address_coord` is a special case, like `primer_grades`, because those paths contain an array. That is the reason why a column `seq` is used in those tables, indicating the position of the element in the original arrays. To understand better the metadata columns it is recommended to read the chapter [metada](how-to-use.md#metadata). +The table `primer_address_coord` is a special case, like `primer_grades`, because those paths contain an array. That is the reason why a column `seq` is used in those tables, indicating the position of the element in the original arrays. To understand better the metadata columns it is recommended to read the chapter [metadata](how-to-use.md#metadata). -``` +```no-highlight did | rid | pid | seq | v_d -----+-------+-------+-----+-------------- 0 | 0 | 0 | 0 | -73.825679 @@ -109,7 +109,7 @@ did | rid | pid | seq | v_d #### primer_grades -``` +```no-highlight did | rid | seq | date_t | score_i | grade_s | score_n -----+-------+-----+------------------------+---------+----------------+--------- 0 | 0 | 0 | 2014-08-21 02:00:00+02 | 6 | A | @@ -163,19 +163,28 @@ The different data types used by ToroDB Stampede are represented in the table be | Postfix | What does it mean? | |---------|--------------------| +| _a | This represents MongoDB's MAX_KEY type, stored with a true value. | | _b | Boolean value, stored as a boolean in PostgreSQL. | | _c | A date (with time) value in format ISO-8601, stored with PostgreSQL type date. | | _d | A 64-bit IEEE 754 floating point, stored with PostgreSQL type double precision. | | _e | A child element, it can be an object or an array, stored with PostgreSQL type boolean with a value of false to indicate a child object and true to indicate a child array. | +| _g | A PostgreSQL jsonb type, composed of two strings meaning the pattern and the evaluation options for a RegEx in MongoDB's style. | | _i | A 32-bit signed two's complement integer, stored with PostgreSQL type integer. | +| _j | This represents the MONGO_JAVASCRIPT type, stored with PostgreSQL type character varying. | +| _k | This represents MongoDB's MIN_KEY type, stored with a false value. | | _l | A 64-bit signed two's complement integer, stored with PostgreSQL type bigint. | -| _n | A null value, stored with PostgreSQL type boolean (nullable). It cannot take value false, just true or null. When the value is true means the JSON document has value null for that path, when it is null it means the path has another value or does not exist for that document. | | _m | A time value in format ISO-8601, stored with PostgreSQL type time. | +| _n | A null value, stored with PostgreSQL type boolean (nullable). It cannot take value false, just true or null. When the value is true means the JSON document has value null for that path, when it is null it means the path has another value or does not exist for that document. | +| _p | This represents the MONGO_DB_POINTER type, and it is stored as a PostgreSQL jsonb, composed of two strings meaning the namespace and the objectId. | +| _q | This represents MongoDB's Decimal128 type. It's stored as a PostgreSQL type containing a numeric value and three booleans that specify whether the value is or isn't infinite, NaN or negative zero. | | _r | Binary object, it is an array of bytes stored in PostgreSQL as bytea. | | _s | An array of UTF-8 characters representing a text, stored with PostgreSQL type character varying. | | _t | Number of milliseconds from 1970-01-01T00:00:00Z, stored with PostgreSQL type timestamptz. | -| _x | This represent the MONGO_OBJECT_ID and it is stored as a PostgreSQL bytea. | -| _y | This represent the MONGO_TIMESTAMP and it is stored as a PostgreSQL composite type formed by an integer column secs and an integer column counter. | +| _u | This represents the undefined type, stored with a true value. | +| _w | This represents the MONGO_JAVASCRIPT_WITH_SCOPE type, stored with PostgreSQL type jsonb. | +| _x | This represents the MONGO_OBJECT_ID and it is stored as a PostgreSQL bytea. | +| _y | This represents the MONGO_TIMESTAMP and it is stored as a PostgreSQL composite type formed by an integer column secs and an integer column counter. | +| _z | This represents the DEPRECATED type. We assign a String to represent it, so it is stored with PostgreSQL type character varying. | __Notes about MONGO_OBJECT_ID__: ObjectIds are small, likely unique, fast to generate, and ordered. ObjectId values consists of 12-bytes, where the first four bytes are a timestamp that reflect the ObjectId’s creation, specifically: @@ -186,11 +195,11 @@ __Notes about MONGO_OBJECT_ID__: ObjectIds are small, likely unique, fast to gen ### Data conflict resolution -Because the JSON documents nature, it can happen that the same path contains different data types or even in some documents the path doesn't exist. That is not a problem for the JSON document but it is for a relational storage where each column should have an associated data type. +Because of JSON documents nature, it can happen that the same path contains different data types or even in some documents the path doesn't exist. That is not a problem for the JSON document but it is for a relational storage where each column should have an associated data type. To solve this problem in ToroDB Stampede, each data type has a different column. For example, in the `primer_grades` table there are two different columns for the `score` key. One is `score_i` that represents the integer values and another one is `score_n` that represents when that value contains null in the original document (because it is mandatory to detect when null value was given and when the path was not given). -``` +```no-highlight did | rid | seq | date_t | score_i | grade_s | score_n ------+-------+-----+------------------------+---------+----------------+--------- 0 | 0 | 0 | 2014-08-21 02:00:00+02 | 6 | A | @@ -259,7 +268,7 @@ The metadata columns in the data tables are not enough to keep the data integrit Table `database` stores the name given by the user to the database in MongoDB, that is stored in a schema in PostgreSQL. Because PostgreSQL has limits on the database names it is dereferenced here, but usually the values are the same unless a very large name is used. -``` +```no-highlight # select * from database; name | identifier @@ -271,7 +280,7 @@ Table `database` stores the name given by the user to the database in MongoDB, t Among the name of the database one, collection name was given in MongoDB layer, so it is stored in the table `collection` dereferencing it in the same way. -``` +```no-highlight # select * from collection; database | name | identifier @@ -285,7 +294,7 @@ As stated above, the name of the table for the root element is the same one used With larger paths, like `address.coord`, the table ref will be the composition of the path, so `{address,coord}`. And the table identifier will be the concatenation of the dereferenced names of collection and path identifiers `primer_address_coord`. -``` +```no-highlight # select * from doc_part; database | collection | table_ref | identifier | last_rid @@ -300,7 +309,7 @@ With larger paths, like `address.coord`, the table ref will be the composition o `field` table stores the data type of each column and its identifier. For a given combination of `database, collection, table_ref`, the used name of the column is stored and the data type associated. This data type can be either a scalar value, like `string` or `double`, or a `child` type (this means an associated table exists) -``` +```no-highlight # select * from field; database | collection | table_ref | name | type | identifier @@ -328,7 +337,7 @@ With larger paths, like `address.coord`, the table ref will be the composition o In the given example, the only row in `scalar` table is related to the path `address.coord` with type `double`. This means that column `v_d` in the table `stampede_address_coord` is a `double`. -``` +```no-highlight # select * from scalar; database | collection | table_ref | type | identifier @@ -342,7 +351,7 @@ The data in the relational storage can be queries like any other relational data For example, the name of all bakeries in the ZIP code 10462, could be: -``` +```no-highlight select p.name_s from primer p, primer_address pa where p.cuisine_s = 'Bakery' @@ -350,7 +359,7 @@ where and pa.zipcode_s = '10462' ``` -``` +```no-highlight # select p.name_s from primer p, primer_address pa where p.cuisine_s = 'Bakery' and p.did = pa.did and pa.zipcode_s = '10462'; name_s @@ -366,7 +375,7 @@ where One of the advantages having the data in a relational format is the ability to execute complex queries in a fast and efficient way. For example, to the previous query, the average score of each bakery could be added with just a few lines. -``` +```no-highlight select p.name_s, avg(pg.score_i) from primer p, primer_address pa, primer_grades pg where @@ -377,7 +386,7 @@ where group by p.name_s ``` -``` +```no-highlight # select p.name_s, avg(pg.score_i) from primer p, primer_address pa, primer_grades pg where p.cuisine_s = 'Bakery' and p.did = pa.did and pa.zipcode_s = '10462' and pg.did = p.did group by p.name_s; name_s | avg @@ -393,7 +402,7 @@ group by p.name_s And one filter can be applied with a few lines more, keeping query very simple and the execution time responsive. -``` +```no-highlight select p.name_s, avg(pg.score_i) from primer p, primer_address pa, primer_grades pg where @@ -405,7 +414,7 @@ group by p.name_s having avg(pg.score_i) > 10 ``` -``` +```no-highlight # select p.name_s, avg(pg.score_i) from primer p, primer_address pa, primer_grades pg where p.cuisine_s = 'Bakery' and p.did = pa.did and pa.zipcode_s = '10462' and pg.did = p.did group by p.name_s having avg(pg.score_i) > 10; name_s | avg diff --git a/stampede/documentation/docs/images/pid_reference.jpeg b/documentation/docs/images/pid_reference.jpeg similarity index 100% rename from stampede/documentation/docs/images/pid_reference.jpeg rename to documentation/docs/images/pid_reference.jpeg diff --git a/stampede/documentation/docs/images/tables_distribution.jpeg b/documentation/docs/images/tables_distribution.jpeg similarity index 100% rename from stampede/documentation/docs/images/tables_distribution.jpeg rename to documentation/docs/images/tables_distribution.jpeg diff --git a/stampede/documentation/docs/images/toro_stampede_mapping.jpg b/documentation/docs/images/toro_stampede_mapping.jpg similarity index 100% rename from stampede/documentation/docs/images/toro_stampede_mapping.jpg rename to documentation/docs/images/toro_stampede_mapping.jpg diff --git a/stampede/documentation/docs/images/toro_stampede_structure.jpg b/documentation/docs/images/toro_stampede_structure.jpg similarity index 100% rename from stampede/documentation/docs/images/toro_stampede_structure.jpg rename to documentation/docs/images/toro_stampede_structure.jpg diff --git a/stampede/documentation/docs/index.md b/documentation/docs/index.md similarity index 100% rename from stampede/documentation/docs/index.md rename to documentation/docs/index.md diff --git a/documentation/docs/installation/binaries.md b/documentation/docs/installation/binaries.md new file mode 100644 index 00000000..57f8dfee --- /dev/null +++ b/documentation/docs/installation/binaries.md @@ -0,0 +1,85 @@ +

Installation with binaries

+ +One of the recommended ways to use ToroDB Stampede is through the binary distribution. It means that a precompiled distribution is downloaded and then executed using command tools. + +## Linux/macOS + +Given that [previous requirements](previous-requirements.md) are met and default configuration is used, to launch ToroDB Stampede download distribution from the next [link](https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.tar.bz2), extract and execute it. + +```no-highlight +wget https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.tar.bz2 + +tar xjf torodb-stampede-1.0.0-beta2.tar.bz2 + +export TOROHOME="$(pwd)/torodb-stampede-1.0.0-beta2" + +"$TOROHOME/bin/torodb-stampede" +``` + +### Configure as a Linux systemd service + +You can install ToroDB Stampede as a systemd service following the next steps: + +```no-highlight +sudo ln -s "$TOROHOME/bin/torodb-stampede" /usr/bin/. + +sudo useradd -M -d "$TOROHOME" torodb + +sudo cp "$TOROHOME/systemd/torodb-stampede.service.sample" /lib/systed/system/torodb-stampede.service +``` + +#### Manage systemd service + +##### Starting the service + +Make shure you have enable ToroDB Stampede service. To enable the service just run: + +```no-highlight +sudo systemctl enable torodb-stampede +``` + +To start the service run: + +```no-highlight +sudo systemctl start torodb-stampede +``` + +##### Stopping the service + +To stop ToroDB Stampede service: + +```no-highlight +sudo systemctl stop torodb-stampede +``` + +##### Accessing logs + +To view logs of ToroDB Stampede service: + +```no-highlight +sudo journalctl --no-pager -u torodb-stampede +``` + +Following logs: + +```no-highlight +sudo journalctl --no-pager -u torodb-stampede -f +``` + +View all logs: + +```no-highlight +sudo journalctl --no-tail --no-pager -u torodb-stampede +``` + + +## Windows + +Given that [previous requirements](previous-requirements.md#create-toropass-file) are met, the only step needed to launch ToroDB Stampede is: + +* Download distribution from the next [link](https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.zip). +* Uncompress the downloaded Zip file in the final ToroDB Stampede directory (`%TOROHOME%`). +* Execute the command `C:\>%TOROHOME%\bin\torodb-stampede` or simply, double click on the `torodb-stampede.bat` file located in folder `bin`. + + + diff --git a/documentation/docs/installation/configuration.md b/documentation/docs/installation/configuration.md new file mode 100644 index 00000000..b169ead3 --- /dev/null +++ b/documentation/docs/installation/configuration.md @@ -0,0 +1,301 @@ +

Configuration

+ +ToroDB Stampede can be launch with custom configuration options. There are two ways to do it, using command modifiers or using a configuration file. The recommended way is using a configuration file because it is more versatile and self-documented. + +To use the configuration file, the `-c` parameter should be specified. + +```no-highlight +torodb-stampede -c myconfiguration.yml + +``` + +Also you can check configuration used by ToroDB Stampede using the `-l` parameter. + +```no-highlight +torodb-stampede -l +``` + +The previous sections talk about basic configuration of the system, but it is highly probable that some specific configuration must be done to work in production environments. + +## Custom PostgreSQL connection + +By default ToroDB Stampede connects to PostgreSQL using the following configuration: + +```json +backend: + postgres: + host: localhost + port: 5432 + database: torod + user: torodb + toropassFile: "~/.toropass" + applicationName: "toro" + ssl: false +``` + +You may change this configuration depending on your requisites. +You can enabled SSL connection setting `ssl: true` in configuration file. +To provide the PostgreSQL user's password that ToroDB Stampede will use to connect to PostgreSQL +you can specify parameter `--ask-for-password` to make ToroDB Stampede prompt for the password while starting up +or you create a PostgreSQL credentials configuration file `~/.toropass`, using the `.pgpass` file format. +The right format is one or more lines formatted as `::::`. + +```no-highlight +echo "localhost:5432:torod:torodb:torodb" > ~/.toropass +chmod 400 ~/.toropass +``` + +You may change the `.toropass` path using the `toropassFile` parameter in ToroDB Stampede configuration file. For example: + +```json +backend: + postgres: + host: localhost + port: 5432 + database: torod + user: torodb + toropassFile: /secret/mytoropass + applicationName: "toro" + ssl: false +``` +## Backend connection pooling + +By default ToroDB Stampede uses a connection pool with the following configuration: + +```json +backend: + pool: + connectionPoolTimeout: 10000 + connectionPoolSize: 30 +``` + +You may tune those parameters at will. The only constraint is that `connectionPoolSize` has to be at least 20. + +## Custom MongoDB connection + +ToroDB Stampede will connect to MongoDB using no authentication and no SSL connection by default. You can set up the connection to MongoDB using `auth` and `ssl` sections in ToroDB Stampede configuration. + +For example to connect using cr or scram_sha1 authentication mode with simple SSL support you may use following configuration: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + auth: + mode: negotiate + user: mymongouser + source: mymongosource + ssl: + enabled: true + allowInvalidHostnames: false + caFile: mycafile.pem +``` + +## Filtering replication + +By default ToroDB Stampede replicates all databases and collections available in your MongoDB. +You can specify some filters that allow to include a single database, include only some collections, +exclude a whole database or exclude some collections changing ToroDB Stampede configuration. +Exclusions always override inclusions so that if you exclude something it will prevail over an inclusion. + +!!! info + Lets assume for our examples that you have two databases, *films* and *music*, and each one has two collections, *title* and *performer*. + +### Include only a MongoDB database or collection + +In the replication section of the yml config file add an include item with the database to include: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + include: + : "*" +``` + +or if you want to include just some collections: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + include: + : + - + - +``` + +If you want to include only the database called *film* but not the specific collection *performer* from same *film* database, you should write: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + include: + film: "performer" +``` + +!!! danger "Inclusion removal" + If you stop ToroDB Stampede, remove an inclusion, and restart ToroDB Stampede, the replication process will replicate operations on this database/collection + without replicating previously data form not included database/collection, reaching an inconsistent state. + + It is recommended to delete ToroDB Stampede database and restart the whole replication process from scratch. + +### Include only a MongoDB collection and a specific index inside that collection + +Sometimes you may want be sure that only specific indexes created in MongoDB have to be replicated by ToroDB Stampede. +MongoDB indexes can be included in ToroDB Stampede allowing you to save disk space and remove unuseful indexes. You just need to add the index name in the include section. + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + include: + : + : + - name: +``` + +If you want to include only collection *performer* from *film* database with the index called *city*, you should write: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + include: + film: + performer: + - name: "city" +``` + +!!! danger "Inclusion removal" + If you stop ToroDB Stampede, remove an inclusion, and restart ToroDB Stampede, the replication process will not create the previously not included indexes. + ToroDB Stampede only creates indexes at the initial recovery process and when a create index command is found in the oplog replication process. + +### Exclude a MongoDB database or collection + +In the replication section of the yml config file add an exclude item with the database to exclude: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + exclude: + : "*" +``` + +or if you want to exclude just some collections: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + exclude: + : + - + - +``` + +The configuration to exclude the whole *music* database, but in *film* database only *performer* collection, you should write: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + exclude: + music: "*" + film: + - performer +``` + +In this case the only collection replicated is *title* from *film* database. + +!!! danger "Exclusion removal" + If you stop ToroDB Stampede, remove an exclusion, and restart ToroDB Stampede, the replication process will replicate operations on this database/collection + without replicating previously data form this database/collection, reaching an inconsistent state. + + It is recommended to delete ToroDB Stampede database and restart the whole replication process from scratch. + +### Exclude a MongoDB index + +Some index created in MongoDB for OLTP operations can be useless for OLAP and analytics operations. +MongoDB indexes can be excluded in ToroDB Stampede allowing you to save disk space. You just need to add the index name in the exclude section. + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + exclude: + : + : + - name: +``` + +If you want to exclude the index called *city* on collection *performer* from *film* database, you should write: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + exclude: + film: + performer: + - name: city +``` + +Any unsupported index in ToroDB Stampede (text , 2dsphere, 2d, hashed, ...) is ignored and is not created in the relational database, and you don't need to exclude it. + +!!! danger "Exclusion removal" + If you stop ToroDB Stampede, remove an exclusion, and restart ToroDB Stampede, the replication process will not create the previously excluded indexes. + ToroDB Stampede only creates indexes at the initial recovery process and when a create index command is found in the oplog replication process. + +### Include only a MongoDB database but not a specific collection + +You can combine the include and exclude sections to indicate that only a particular database have to be included, but exclude a particular collection in the database. + +If you want to include only the database called *film* but not the specific collection *performer* from same *film* database, you should write: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + include: + film: "*" + exclude: + film: "performer" +``` + +### Include only a MongoDB collection in a database but not a specific index inside that collection + +You can combine the include and exclude sections to indicate that only a particular collection have to included with all indexes excluding just one. + +If you want to include only collection *performer* from *film* database but not the index called *city*, you should write: + +```json +replication: + replSetName: rs1 + syncSource: localhost:27017 + include: + film: + performer: "*" + exclude: + film: + performer: + - name: "city" +``` + +## Replicate from a MongoDB Sharded Cluster + + +In the replication section of the yml config file add a shards item with the list of shards's configurations, one for each shard: + +```json +replication: + shards: + - replSetName: shard1 + syncSource: localhost:27020 + - replSetName: shard2 + syncSource: localhost:27030 + - replSetName: shard3 + syncSource: localhost:27040 +``` diff --git a/documentation/docs/installation/deb.md b/documentation/docs/installation/deb.md new file mode 100644 index 00000000..63d69436 --- /dev/null +++ b/documentation/docs/installation/deb.md @@ -0,0 +1,57 @@ +

Installation for Ubuntu/Debian

+ToroDB Stampede can be installed from a PPA repository in two flavours: + +* torodb-stampede: in this package ToroDB Stampede comes without the backend dependency. This package is used when you have PostgreSQL installed in a different machine. +* torodb-stampede-postgres: in this package ToroDB Stampede comes with a PostgreSQL dependency. This package is handy if you want to minimize configuration steps but have te requirements of install ToroDB Stamepde and PostgreSQL server in separate machines. + +## Install package torodb-stampede + +Just run: + +``` +sudo add-apt-repository -y ppa:8kdata +sudo apt update +sudo apt install torodb-stampede +``` + +And then to setup ToroDB Stampede run interactive script as root user: + +``` +sudo torodb-stampede-setup +``` + +You will be prompted to provide superuser credentials (if you didn't created ToroDB's database and user yourself), ToroDB's user credentials and MongoDB credentials. + +!!! info "Manage ToroDB Stampede service" + To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service) + +## Install package torodb-stampede-postgres + +Just run: + +``` +sudo add-apt-repository -y ppa:8kdata +sudo apt update +sudo apt install torodb-stampede-postgres +``` + +And then to setup ToroDB Stampede run interactive script as root user: + +``` +sudo torodb-stampede-setup +``` + +You will be prompted to provide MongoDB credentials. + +!!! info "Manage ToroDB Stampede service" + To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service). + +## Nightly build packages + +To install latest unstable nightly build packages just use ppa-dev repository: + +``` +sudo add-apt-repository -y ppa:8kdata/ppa-dev +sudo apt update +sudo apt install torodb-stampede +``` diff --git a/stampede/documentation/docs/installation/docker.md b/documentation/docs/installation/docker.md similarity index 67% rename from stampede/documentation/docs/installation/docker.md rename to documentation/docs/installation/docker.md index 1baba671..46aa7fa8 100644 --- a/stampede/documentation/docs/installation/docker.md +++ b/documentation/docs/installation/docker.md @@ -8,15 +8,15 @@ ToroDB Stampede can be tested in a Docker container in two different ways. First If `.toropass` file is created the docker containers can be launched with the command below. ```no-highlight -$ docker run -ti -v `realpath `:/root/.toropass torodb/stampede +docker run -ti -v `realpath `:/root/.toropass torodb/stampede ``` In other case it will be enough with the creation of the environment variable `TORODB_BACKEND_PASSWORD`. ```no-highlight -$ TORODB_BACKEND_PASSWORD="" +TORODB_BACKEND_PASSWORD="" -$ docker run -ti torodb/stampede +docker run -ti torodb/stampede ``` ### With Docker Compose @@ -24,9 +24,9 @@ $ docker run -ti torodb/stampede The docker compose file must be downloaded and executed. ```no-highlight -$ wget https://raw.githubusercontent.com/torodb/torodb/master/stampede/main/src/main/dist/docker/compose/torodb-stampede-fullstack/docker-compose.yml +wget https://raw.githubusercontent.com/torodb/stampede/master/main/src/main/dist/docker/compose/torodb-stampede-fullstack/docker-compose.yml -$ docker-compose up +docker-compose up ``` ## From source code @@ -36,13 +36,13 @@ $ docker-compose up The source code contains some Maven tasks that can build the right artifacts to execute ToroDB Stampede and its dependencies in Docker containers. ```no-highlight -$ mvn clean package -P prod,docker -Ddocker.skipbase=false +mvn clean package -P prod,docker -Ddocker.skipbase=false -$ mvn -f stampede/main/pom.xml -P docker-stampede-fullstack docker:run -Ddocker.follow +mvn -f stampede/main/pom.xml -P docker-stampede-fullstack docker:run -Ddocker.follow ``` Sometimes, errors can appear due to the Docker cache. If that happens, cache can be disabled using command options, like is done in the next example. Usually these errors are related to network connection timeouts. ```no-highlight -$ mvn clean package -P prod,docker -Ddocker.skipbase=false -Ddocker.nocache=true +mvn clean package -P prod,docker -Ddocker.skipbase=false -Ddocker.nocache=true ``` diff --git a/stampede/documentation/docs/installation/jvm-configuration-tips.md b/documentation/docs/installation/jvm-configuration-tips.md similarity index 100% rename from stampede/documentation/docs/installation/jvm-configuration-tips.md rename to documentation/docs/installation/jvm-configuration-tips.md diff --git a/stampede/documentation/docs/installation/postgresql-configuration-tips.md b/documentation/docs/installation/postgresql-configuration-tips.md similarity index 99% rename from stampede/documentation/docs/installation/postgresql-configuration-tips.md rename to documentation/docs/installation/postgresql-configuration-tips.md index f77cd729..b0b2f4de 100644 --- a/stampede/documentation/docs/installation/postgresql-configuration-tips.md +++ b/documentation/docs/installation/postgresql-configuration-tips.md @@ -34,6 +34,7 @@ It is the maximum time between automatic WAL checkpoints. A value between 15 and Setting the value to 1/2 of total memory would be a normal conservative setting, and 3/4 of memory is a more aggressive but still reasonable amount. + \ No newline at end of file diff --git a/stampede/documentation/docs/installation/previous-requirements.md b/documentation/docs/installation/previous-requirements.md similarity index 52% rename from stampede/documentation/docs/installation/previous-requirements.md rename to documentation/docs/installation/previous-requirements.md index eef48275..2bd16bb9 100644 --- a/stampede/documentation/docs/installation/previous-requirements.md +++ b/documentation/docs/installation/previous-requirements.md @@ -11,15 +11,6 @@ ToroDB Stampede's correct operation depends on a number of known dependencies, i | PostgreSQL | ToroDB Stampede correct operation relies on the existence of a backend, right now it should be PostgreSQL. | [more info](https://wiki.postgresql.org/wiki/Detailed_installation_guides) | | Java | ToroDB Stampede has been written in Java so a Java Virtual Machine is required for it's execution. | [more info](https://java.com/en/download/help/index_installing.xml) | -Among the previous dependencies, if we want to compile the source code other requisites are mandatory. - -| | Description | External links | -|-|-------------|----------------| -| Git | It is the distributed version control system (DVCS) used to keep ToroDB Stampede source code up to date and synchronized between its committers. | [more info](https://git-scm.com/downloads) | -| Maven | Dependency management and construction tasks has been delegated to Apache Maven, so it is necessary to compile the source code. | [more info](http://maven.apache.org/install.html) |  -| Docker | An open-source project that automates the deployment of Linux applications inside software containers. It allow to run a ToroDB Stampede and to test it in a controlled environment. | [more info](https://docs.docker.com/) |  -| Docker Compose | A tool for defining and running multi-container Docker applications. It allow to run test scenarios like a ToroDB Stampede replicating from a MongoDB and connected to a PostgreSQL. | [more info](https://docs.docker.com/compose/install/) |  - ## Backend setup ### PostgreSQL configuration @@ -29,29 +20,43 @@ To work properly, the default installation of ToroDB Stampede requires a new use #### Linux ```no-highlight -$ createuser -S -R -D -P --interactive torodb +createuser -S -R -D -P --interactive torodb -$ createdatabase -O torodb torod +createdatabase -O torodb torod ``` #### macOS/Windows -In macOS and Windows the user and database can be created using an administration connection with `psql` command. +In macOS and Windows the user and database can be created using an administration connection with `psql` command (do not forget to change `` with the chosen passowrd). ```no-highlight -> CREATE USER torodb WITH PASSWORD ''; +CREATE USER torodb WITH PASSWORD ''; -> CREATE DATABASE torod OWNER torodb; +CREATE DATABASE torod OWNER torodb; ``` ### Create .toropass file -The access configuration to the PostgreSQL database will be detailed in the `.toropass` file stored in the home directory. The example assumes local connection with default port is being used, but it can be changed by the user too. +The access configuration to the PostgreSQL database will be detailed in the `.toropass` file stored in the home directory. +The example assumes local connection with default port is being used, but it can be changed by the user too. -#### Linux/macOS/Windows - -Create `.toropass` file in the home path with the content below. +Create `.toropass` file in the home path with the content below (do not forget to change `` with the chosen passowrd). ```no-highlight localhost:5432:torod:torodb: ``` + +#### Linux/macOS + +```no-highlight +read -s -p "Enter password:" PASSWORD +echo +echo "localhost:5432:torod:torodb:$PASSWORD" > "$HOME/.toropass" +``` + +#### Windows + +```no-highlight +set PASSWORD= +echo localhost:5432:torod:torodb:%PASSWORD%>%HOMEDRIVE%%HOMEPATH%\.toropass +``` diff --git a/documentation/docs/installation/rpm.md b/documentation/docs/installation/rpm.md new file mode 100644 index 00000000..2945ba04 --- /dev/null +++ b/documentation/docs/installation/rpm.md @@ -0,0 +1,88 @@ +

Installation for Fedora/CentOS

+ToroDB Stampede can be installed from a COPR repository in two flavours: + +* torodb-stampede: in this package ToroDB Stampede comes without the backend dependency. This package is used when you have PostgreSQL installed in a different machine. +* torodb-stampede-postgres: in this package ToroDB Stampede comes with a PostgreSQL dependency. +This package is handy if you want to minimize configuration steps but have te requirements of install ToroDB Stamepde and PostgreSQL server in separate machines. + +## Install package torodb-stampede + +Just run as root user: + +### For Fedora 21 / CentOS + +```no-highlight +yum -y install yum-plugin-copr +yum -y copr enable eightkdata/torodb +yum -y install torodb-stampede +``` + +### For Fedora >= 22 + +```no-highlight +dnf -y install dnf-plugins-core +dnf -y copr enable eightkdata/torodb +dnf -y install torodb-stampede +``` + +And then to setup ToroDB Stampede run interactive script as root user: + +```no-highlight +torodb-stampede-setup +``` + +You will be prompted to provide superuser credentials (if you didn't created ToroDB's database and user yourself), ToroDB's user credentials and MongoDB credentials. + +!!! info "Manage ToroDB Stampede service" + To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service). + +## Install package torodb-stampede-postgres + +Just run as root user: + +### For Fedora 21 / CentOS + +```no-highlight +yum -y install yum-plugin-copr +yum -y copr enable eightkdata/torodb +yum -y install torodb-stampede-postgres +``` + +### For Fedora >= 22 + +```no-highlight +dnf -y install dnf-plugins-core +dnf -y copr enable eightkdata/torodb +dnf -y install torodb-stampede-postgres +``` + +And then to setup ToroDB Stampede run interactive script as root user: + +```no-highlight +torodb-stampede-setup +``` + +You will be prompted to provide MongoDB credentials. + +!!! info "Manage ToroDB Stampede service" + To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service). + +## Nightly build packages + +To install latest unstable nightly build packages just use torodb-dev repository (as root): + +### For Fedora 21 / CentOS + +```no-highlight +yum -y install yum-plugin-copr +yum -y copr enable eightkdata/torodb-dev +yum -y install torodb-stampede +``` + +### For Fedora >= 22 + +```no-highlight +dnf -y install dnf-plugins-core +dnf -y copr enable eightkdata/torodb-dev +dnf -y install torodb-stampede +``` diff --git a/documentation/docs/installation/snap.md b/documentation/docs/installation/snap.md new file mode 100644 index 00000000..576b4c09 --- /dev/null +++ b/documentation/docs/installation/snap.md @@ -0,0 +1,53 @@ +

Installation with SNAP

+ToroDB Stampede can be installed from SNAP public store in two flavours: + +* torodb-stampede: in this package ToroDB Stampede comes alone without the backend included so you will have to provide one. +* torodb-stampede-postgres: in this package ToroDB Stampede comes with a PostgreSQL so you will be able to start using it with minimal configuration. + +## Install package torodb-stampede + +Just run: + +``` +sudo snap install torodb-stampede +``` + +And then to setup ToroDB Stampede run interactive script as root user: + +``` +sudo torodb-stampede.setup +``` + +You will be prompted to provide superuser credentials (if you didn't created ToroDB's database and user yourself), ToroDB's user credentials and MongoDB credentials. + +!!! info "Manage ToroDB Stampede service" + To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service). + Replace service name from *torodb-stampede* to *snap.torodb-stampede.daemon.service*. + +## Install package torodb-stampede-postgres + +Just run: + +``` +sudo snap install torodb-stampede-postgres +``` + +And then to setup ToroDB Stampede run interactive script as root user: + +``` +sudo torodb-stampede.setup +``` + +You will be prompted to provide MongoDB credentials. + +!!! info "Manage ToroDB Stampede service" + To manage ToroDB Stampede service please refer to [manage systemd service section](binaries#manage-systemd-service). + Replace service name from *torodb-stampede* to *snap.torodb-stampede-postgres.daemon.service*. + +## Nightly build packages + +To install latest unstable nightly build packages just add `--edge` and `--devmode` parameter to `snap` command: + +``` +sudo snap install torodb-stampede --edge --devmode +``` diff --git a/documentation/docs/installation/source-code.md b/documentation/docs/installation/source-code.md new file mode 100644 index 00000000..8fa565ba --- /dev/null +++ b/documentation/docs/installation/source-code.md @@ -0,0 +1,76 @@ +

Installation from source code

+ +The installation from the source code is quite similar to the binary installation, but it is necessary to build ToroDB Stampede from the sources first. + +Among the dependencies you found in [previous requirements](previous-requirements.md#project-dependencies) section, if we want to compile the source code other requisites are mandatory. + +| | Description | External links | +|-|-------------|----------------| +| Git | It is the distributed version control system (DVCS) used to keep ToroDB Stampede source code up to date and synchronized between its committers. | [more info](https://git-scm.com/downloads) | +| Maven | Dependency management and construction tasks has been delegated to Apache Maven, so it is necessary to compile the source code. | [more info](http://maven.apache.org/install.html) |  +| Docker | An open-source project that automates the deployment of Linux applications inside software containers. It allow to run a ToroDB Stampede and to test it in a controlled environment. | [more info](https://docs.docker.com/) |  +| Docker Compose | A tool for defining and running multi-container Docker applications. It allow to run test scenarios like a ToroDB Stampede replicating from a MongoDB and connected to a PostgreSQL. | [more info](https://docs.docker.com/compose/install/) |  + +## Linux/macOS + +Download source code. + +```no-highlight +cd /tmp + +git clone https://github.com/torodb/stampede.git +``` + +Compile source code. + +```no-highlight +cd stampede + +mvn clean package -P assembler,prod +``` + +As explained in [previous requirements](previous-requirements.md#create-toropass-file) section, create `.toropass` file at current user home directory with the next content. + +```no-highlight +echo "localhost:5432:torod:torodb:" > ~/.toropass +``` + +Extract and launch ToroDB Stampede (replace `$TOROHOME` with final ToroDB Stampede directory). + +```no-highlight +cd "$TOROHOME" + +tar xjf "$TOROHOME/stampede/main/target/dist/torodb-stampede-1.0.0-beta2.tar.bz2" + +torodb-stampede-1.0.0-beta2/bin/torodb-stampede +``` + +## Windows + +Download source code in some temporal directory. + +```no-highlight +C:\tmp\>git clone https://github.com/torodb/stampede.git +``` + +Compile source code. + +```no-highlight +C:\tmp\>cd stampede + +C:\tmp\stampede>mvn clean package -P assembler,prod +``` + +As explained in [previous requirements](previous-requirements.md#create-toropass-file) section, create `.toropass` file at current user home directory `%HOME%\.toropass` with the next content. + +```no-highlight +localhost:5432:torod:torodb: +``` + +Uncompress the Zip file located in `C:\tmp\torodb\stampede\main\target\dist\torodb-stampede-1.0.0-beta2.zip` in the final ToroDB Stampede directory (replace `%TOROHOME%` with final ToroDB Stampede directory), and then execute the command: + +```no-highlight +C:\>%TOROHOME%\bin\torodb-stampede +``` + +or simply, double click on the `torodb-stampede.bat` file located in folder `bin`. diff --git a/documentation/docs/metrics.md b/documentation/docs/metrics.md new file mode 100644 index 00000000..1c07e7e7 --- /dev/null +++ b/documentation/docs/metrics.md @@ -0,0 +1,29 @@ +

Metrics

+ +ToroDB Stampede exposes multiple metrics using JMX, some of them are custom metrics and other are metrics offered by third party products like Flexy-pool. + +## Flexy-pool metrics + +ToroDB Stampede uses Hikari as a connection pool, but it is wrapped with Flexy-pool, so the metrics exposed by Flexy-pool are available through JMX. So if a JMX console is used the following metrics are available. + + +| Name | Description | +|------|-------------| +| concurrentConnectionsHistogram | A histogram of the number of concurrent connections. This indicates how many connections are being used at once. | +| concurrentConnectionRequestsHistogram | A histogram of the number of concurrent connection requests. This indicates how many connection are being requested at once. | +| connectionAcquireMillis | A time histogram for the target data source connection acquire interval. | +| connectionLeaseMillis | A time histogram for the connection lease time. The lease time is the duration between the moment a connection is acquired and the time it gets released. | +| maxPoolSizeHistogram | A histogram of the target pool size. The pool size might change if the IncrementPoolOnTimeoutConnectionAcquiringStrategy is being used. | +| overallConnectionAcquireMillis | A time histogram for the total connection acquire interval. This is the connectionAcquireMillis plus the time spent by the connection acquire strategies. | +| overflowPoolSizeHistogram | A histogram of the pool size overflowing. The pool size might overflow if the IncrementPoolOnTimeoutConnectionAcquiringStrategy is being used. | +| retryAttemptsHistogram | A histogram of the retry attempts number. This is incremented by the RetryConnectionAcquiringStrategy. | + +Because ToroDB Stampede uses more than one connection pool, multiple space names will be avaible through the JMX console. + +| Spacename | Description | +|-----------|-------------| +| com.vladmihalcea.flexypool.metric.codehale.JmxMetricReporter.cursors | Read only connections used by the system. | +| com.vladmihalcea.flexypool.metric.codehale.JmxMetricReporter.session | Connections used by the system to do the replication process from the MongoDB instance. | +| com.vladmihalcea.flexypool.metric.codehale.JmxMetricReporter.system | Connections used by the system to do internal operations. | + +More information can be found in this [link](https://github.com/vladmihalcea/flexy-pool) \ No newline at end of file diff --git a/stampede/documentation/docs/quickstart.md b/documentation/docs/quickstart.md similarity index 72% rename from stampede/documentation/docs/quickstart.md rename to documentation/docs/quickstart.md index 59efc215..46a4273f 100644 --- a/stampede/documentation/docs/quickstart.md +++ b/documentation/docs/quickstart.md @@ -20,29 +20,29 @@ ToroDB Stampede expects some basic configuration for the relational backend. The This steps can be done with the next commands in a Linux environment: ```no-highlight -$ sudo -u postgres createuser -S -R -D -P --interactive torodb +sudo -u postgres createuser -S -R -D -P --interactive torodb -$ sudo -u postgres createdb -O torodb torod +sudo -u postgres createdb -O torodb torod ``` The easiest way to check if the database can be used is connecting to it using the new role. If it is accessible then ToroDB Stampede should be able to do replication using it. ```no-highlight -$ psql -U torodb torod +psql -U torodb torod ``` ## How to execute ToroDB Stampede binary distribution? -To execute ToroDB Stampede the binary distribution is necessary and it can be downloaded from [here](https://www.dropbox.com/s/54eyp7jyu8l70aa/torodb-stampede-0.50.0-SNAPSHOT.tar.bz2?dl=0). After download and when file is uncompressed then ToroDB Stampede can be launched using the PostgreSQL connection information. +To execute ToroDB Stampede the binary distribution is necessary and it can be downloaded from [here](https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.tar.bz2). After download and when file is uncompressed then ToroDB Stampede can be launched using the PostgreSQL connection information. Following commands will allow ToroDB Stampede to be launched. ```no-highlight -$ wget "https://www.dropbox.com/s/54eyp7jyu8l70aa/torodb-stampede-0.50.0-SNAPSHOT.tar.bz2?dl=0" -O torodb-stampede-0.50.0-SNAPSHOT.tar.bz2 +wget "https://www.torodb.com/download/torodb-stampede-1.0.0-beta2.tar.bz2" -$ tar xjf torodb-stampede-0.50.0-SNAPSHOT.tar.bz2 +tar xjf torodb-stampede-1.0.0-beta2.tar.bz2 -$ torodb-stampede-0.50.0-SNAPSHOT/bin/torodb-stampede --ask-for-password +torodb-stampede-1.0.0-beta2/bin/torodb-stampede --ask-for-password ``` ToroDB Stampede will ask for the PostgreSQL torodb user's password to be provided. If all goes fine, ToroDB Stampede is up and running and it will be replicating the operations done in MongoDB. @@ -51,18 +51,18 @@ ToroDB Stampede will ask for the PostgreSQL torodb user's password to be provide It is easier to understand what ToroDB Stampede does through an example. One dataset will be imported in MongoDB and all data will be available in PostgreSQL thanks to Stampede replication. -If previous steps are done and ToroDB Stampede is up and running, the dataset can be downloaded from [here](https://www.dropbox.com/s/570d4tyt4hpsn03/primer-dataset.json?dl=0) and the replication done using `mongoimport` command. +If previous steps are done and ToroDB Stampede is up and running, the dataset can be downloaded from [here](https://www.torodb.com/download/primer-dataset.json) and the replication done using `mongoimport` command. ```no-highlight -$ wget https://www.dropbox.com/s/570d4tyt4hpsn03/primer-dataset.json?dl=0 +wget https://www.torodb.com/download/primer-dataset.json -$ mongoimport -d stampede -c primer primer-dataset.json +mongoimport -d stampede -c primer primer-dataset.json ``` When `mongoimport` finished and replication complete PostgreSQL should have the replicated structure and data stored in the `stampede` schema, because that was the name selected for the database in the `mongoimport` command. Connecting to PostgreSQL console, the data can be accessed. ```no-highlight -$ sudo -u torodb psql torod +sudo -u torodb psql torod > set schema 'stampede' ``` diff --git a/stampede/documentation/docs/real-app.md b/documentation/docs/real-app.md similarity index 100% rename from stampede/documentation/docs/real-app.md rename to documentation/docs/real-app.md diff --git a/stampede/documentation/docs/supported-backends.md b/documentation/docs/supported-backends.md similarity index 100% rename from stampede/documentation/docs/supported-backends.md rename to documentation/docs/supported-backends.md diff --git a/stampede/documentation/mkdocs.yml b/documentation/mkdocs.yml similarity index 68% rename from stampede/documentation/mkdocs.yml rename to documentation/mkdocs.yml index 7b061872..b96d7263 100644 --- a/stampede/documentation/mkdocs.yml +++ b/documentation/mkdocs.yml @@ -8,10 +8,15 @@ pages: - 'Previous requirements': installation/previous-requirements.md - 'Installation with Docker': installation/docker.md - 'Installation with binaries': installation/binaries.md + - 'Installation with SNAP': installation/snap.md + - 'Installation for Ubuntu/Debian': installation/deb.md + - 'Installation for Fedora/CentOS': installation/rpm.md - 'Installation from source code': installation/source-code.md - 'Configuration': installation/configuration.md - 'PostgreSQL configuration tips': installation/postgresql-configuration-tips.md + - 'Java Virtual Machine configuration tips': installation/jvm-configuration-tips.md - 'How to use?': how-to-use.md + - 'Metrics': metrics.md - 'FAQ': faq.md - 'Glossary': glossary.md - 'Appendix': appendix.md @@ -19,3 +24,4 @@ extra_css: - css/8kdata.css markdown_extensions: - admonition: +google_analytics: ['UA-44578894-3', 'www.torodb.com'] \ No newline at end of file diff --git a/engine/README.md b/engine/README.md deleted file mode 100644 index 093bb8cf..00000000 --- a/engine/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# ToroDB Engine - -ToroDB engine is the core technology used by ToroDB Server and ToroDB Stampede. diff --git a/engine/backend/common/pom.xml b/engine/backend/common/pom.xml deleted file mode 100644 index c648716b..00000000 --- a/engine/backend/common/pom.xml +++ /dev/null @@ -1,125 +0,0 @@ - - - 4.0.0 - - - com.torodb.engine.backend - backend-pom - 0.50.0 - - - backend-common - ToroDB: Backend common - jar - - - - org.apache.logging.log4j - log4j-api - - - com.google.guava - guava - - - com.google.code.findbugs - annotations - - - org.jooq - jooq - - - com.zaxxer - HikariCP - - - com.torodb.engine.kvdocument - kvdocument-core - ${project.version} - - - com.torodb.engine - metainfo-cache - ${project.version} - - - com.torodb.engine - core - ${project.version} - - - javax.json - javax.json-api - - - javax.inject - javax.inject - - - - junit - junit - test - - - org.mockito - mockito-core - test - - - com.torodb.engine.kvdocument - json-converter - ${project.version} - test - - - com.torodb.engine - d2r - ${project.version} - test - - - com.torodb.engine - d2r - ${project.version} - test-jar - test - - - com.google.inject - guice - - - org.apache.logging.log4j - log4j-core - - - com.torodb.engine - concurrent - ${project.version} - - - com.google.inject.extensions - guice-assistedinject - - - - - - - org.apache.maven.plugins - maven-jar-plugin - 2.6 - - - - test-jar - - - - - - - diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractCursor.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractCursor.java deleted file mode 100644 index 8cc2725a..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractCursor.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.core.cursors.Cursor; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - -import javax.annotation.Nonnull; - -public abstract class AbstractCursor implements Cursor { - - public final ErrorHandler errorHandler; - public final ResultSet resultSet; - public boolean movedNext = false; - public boolean hasNext = false; - - public AbstractCursor(@Nonnull ErrorHandler errorHandler, @Nonnull ResultSet resultSet) { - this.errorHandler = errorHandler; - this.resultSet = resultSet; - } - - @Override - public boolean hasNext() { - try { - if (!movedNext) { - hasNext = resultSet.next(); - movedNext = true; - } - - return hasNext; - } catch (SQLException ex) { - throw errorHandler.handleException(Context.FETCH, ex); - } - } - - @Override - public T next() { - try { - hasNext(); - movedNext = false; - - return read(resultSet); - } catch (SQLException ex) { - throw errorHandler.handleException(Context.FETCH, ex); - } - } - - protected abstract T read(ResultSet resultSet) throws SQLException; - - @Override - public void close() { - try { - resultSet.close(); - } catch (SQLException ex) { - throw errorHandler.handleException(Context.FETCH, ex); - } - } - - @Override - public List getNextBatch(final int maxSize) { - List batch = new ArrayList<>(); - - for (int index = 0; index < maxSize && hasNext(); index++) { - batch.add(next()); - } - - return batch; - } - - @Override - public List getRemaining() { - List batch = new ArrayList<>(); - - while (hasNext()) { - batch.add(next()); - } - - return batch; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractDataTypeProvider.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractDataTypeProvider.java deleted file mode 100644 index 93f15094..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractDataTypeProvider.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.collect.ImmutableMap; -import com.torodb.backend.converters.jooq.DataTypeForKv; -import com.torodb.core.transaction.metainf.FieldType; - -import javax.inject.Singleton; - -/** - * - */ -@Singleton -public abstract class AbstractDataTypeProvider implements DataTypeProvider { - - private final ImmutableMap> dataTypes; - - protected AbstractDataTypeProvider(ImmutableMap> dataTypes) { - this.dataTypes = ImmutableMap.>builder() - .putAll(dataTypes) - .build(); - - //Check that all data types are specified or throw IllegalArgumentException - for (FieldType fieldType : FieldType.values()) { - getDataType(fieldType); - } - } - - @Override - public DataTypeForKv getDataType(FieldType type) { - DataTypeForKv dataType = dataTypes.get(type); - if (dataType == null) { - throw new IllegalArgumentException("It is not defined how to map elements of type " + type - + " to SQL"); - } - return dataType; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractDbBackendService.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractDbBackendService.java deleted file mode 100644 index adcc87c6..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractDbBackendService.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.base.Preconditions; -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.core.annotations.TorodbIdleService; -import com.torodb.core.services.IdleTorodbService; -import com.zaxxer.hikari.HikariConfig; -import com.zaxxer.hikari.HikariDataSource; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.concurrent.ThreadFactory; - -import javax.annotation.Nonnull; -import javax.sql.DataSource; - -/** - * - */ -public abstract class AbstractDbBackendService - extends IdleTorodbService implements DbBackendService { - - private static final Logger LOGGER = LogManager.getLogger(AbstractDbBackendService.class); - - public static final int SYSTEM_DATABASE_CONNECTIONS = 1; - public static final int MIN_READ_CONNECTIONS_DATABASE = 1; - public static final int MIN_SESSION_CONNECTIONS_DATABASE = 2; - public static final int MIN_CONNECTIONS_DATABASE = SYSTEM_DATABASE_CONNECTIONS - + MIN_READ_CONNECTIONS_DATABASE - + MIN_SESSION_CONNECTIONS_DATABASE; - - private final ConfigurationT configuration; - private final ErrorHandler errorHandler; - private HikariDataSource writeDataSource; - private HikariDataSource systemDataSource; - private HikariDataSource readOnlyDataSource; - /** - * Global state variable for data import mode. If true data import mode is enabled, data import - * mode is otherwise disabled. Indexes will not be created while data import mode is enabled. When - * this mode is enabled importing data will be faster. - */ - private volatile boolean dataImportMode; - - /** - * Configure the backend. The contract specifies that any subclass must call initialize() method - * after properly constructing the object. - * - * @param threadFactory the thread factory that will be used to create the startup and shutdown - * threads - * @param configuration - * @param errorHandler - */ - public AbstractDbBackendService(@TorodbIdleService ThreadFactory threadFactory, - ConfigurationT configuration, ErrorHandler errorHandler) { - super(threadFactory); - this.configuration = configuration; - this.errorHandler = errorHandler; - this.dataImportMode = false; - - int connectionPoolSize = configuration.getConnectionPoolSize(); - int reservedReadPoolSize = configuration.getReservedReadPoolSize(); - Preconditions.checkState( - connectionPoolSize >= MIN_CONNECTIONS_DATABASE, - "At least " + MIN_CONNECTIONS_DATABASE - + " total connections with the backend SQL database are required" - ); - Preconditions.checkState( - reservedReadPoolSize >= MIN_READ_CONNECTIONS_DATABASE, - "At least " + MIN_READ_CONNECTIONS_DATABASE + " read connection(s) is(are) required" - ); - Preconditions.checkState( - connectionPoolSize - reservedReadPoolSize >= MIN_SESSION_CONNECTIONS_DATABASE, - "Reserved read connections must be lower than total connections minus " - + MIN_SESSION_CONNECTIONS_DATABASE - ); - } - - @Override - protected void startUp() throws Exception { - int reservedReadPoolSize = configuration.getReservedReadPoolSize(); - - writeDataSource = createPooledDataSource( - configuration, "session", - configuration.getConnectionPoolSize() - reservedReadPoolSize - SYSTEM_DATABASE_CONNECTIONS, - getCommonTransactionIsolation(), - false - ); - systemDataSource = createPooledDataSource( - configuration, "system", - SYSTEM_DATABASE_CONNECTIONS, - getSystemTransactionIsolation(), - false); - readOnlyDataSource = createPooledDataSource( - configuration, "cursors", - reservedReadPoolSize, - getGlobalCursorTransactionIsolation(), - true); - } - - @Override - @SuppressFBWarnings(value = "UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR", - justification = - "Object lifecyle is managed as a Service. Datasources are initialized in setup method") - protected void shutDown() throws Exception { - writeDataSource.close(); - systemDataSource.close(); - readOnlyDataSource.close(); - } - - @Nonnull - protected abstract TransactionIsolationLevel getCommonTransactionIsolation(); - - @Nonnull - protected abstract TransactionIsolationLevel getSystemTransactionIsolation(); - - @Nonnull - protected abstract TransactionIsolationLevel getGlobalCursorTransactionIsolation(); - - private HikariDataSource createPooledDataSource( - ConfigurationT configuration, String poolName, int poolSize, - TransactionIsolationLevel transactionIsolationLevel, - boolean readOnly - ) { - HikariConfig hikariConfig = new HikariConfig(); - - // Delegate database-specific setting of connection parameters and any specific configuration - hikariConfig.setDataSource(getConfiguredDataSource(configuration, poolName)); - - // Apply ToroDB-specific datasource configuration - hikariConfig.setConnectionTimeout(configuration.getConnectionPoolTimeout()); - hikariConfig.setPoolName(poolName); - hikariConfig.setMaximumPoolSize(poolSize); - hikariConfig.setTransactionIsolation(transactionIsolationLevel.name()); - hikariConfig.setReadOnly(readOnly); - /* - * TODO: implement to add metric support. See - * https://github.com/brettwooldridge/HikariCP/wiki/Codahale-Metrics - * hikariConfig.setMetricRegistry(...); - */ - - LOGGER.info("Created pool {} with size {} and level {}", poolName, poolSize, - transactionIsolationLevel.name()); - - return new HikariDataSource(hikariConfig); - } - - protected abstract DataSource getConfiguredDataSource(ConfigurationT configuration, - String poolName); - - @Override - public void disableDataInsertMode() { - this.dataImportMode = false; - } - - @Override - public void enableDataInsertMode() { - this.dataImportMode = true; - } - - @Override - public DataSource getSessionDataSource() { - checkState(); - - return writeDataSource; - } - - @Override - public DataSource getSystemDataSource() { - checkState(); - - return systemDataSource; - } - - @Override - public DataSource getGlobalCursorDatasource() { - checkState(); - - return readOnlyDataSource; - } - - protected void checkState() { - if (!isRunning()) { - throw new IllegalStateException("The " + serviceName() + " is not running"); - } - } - - @Override - public long getDefaultCursorTimeout() { - return configuration.getCursorTimeout(); - } - - @Override - public boolean isOnDataInsertMode() { - return dataImportMode; - } - - @Override - public boolean includeForeignKeys() { - return configuration.includeForeignKeys(); - } - - protected void postConsume(Connection connection, boolean readOnly) throws SQLException { - connection.setReadOnly(readOnly); - if (!connection.isValid(500)) { - throw new RuntimeException("DB connection is not valid"); - } - connection.setAutoCommit(false); - } - - private Connection consumeConnection(DataSource ds, boolean readOnly) { - checkState(); - - try { - Connection c = ds.getConnection(); - postConsume(c, readOnly); - - return c; - } catch (SQLException ex) { - throw errorHandler.handleException(Context.GET_CONNECTION, ex); - } - } - - @Override - public Connection createSystemConnection() { - checkState(); - - return consumeConnection(systemDataSource, false); - } - - @Override - public Connection createReadOnlyConnection() { - checkState(); - - return consumeConnection(readOnlyDataSource, true); - } - - @Override - public Connection createWriteConnection() { - checkState(); - - return consumeConnection(writeDataSource, false); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractErrorHandler.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractErrorHandler.java deleted file mode 100644 index 5dce399a..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractErrorHandler.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.torodb.backend.exceptions.BackendException; -import com.torodb.core.exceptions.ToroRuntimeException; -import com.torodb.core.exceptions.user.UserException; -import com.torodb.core.transaction.RollbackException; -import org.jooq.exception.DataAccessException; - -import java.sql.SQLException; -import java.util.Optional; -import java.util.function.Function; - -import javax.inject.Singleton; - -/** - * - */ -@Singleton -public abstract class AbstractErrorHandler implements ErrorHandler { - - private final ImmutableList rollbackRules; - private final ImmutableList userRules; - - protected AbstractErrorHandler(Rule... rules) { - ImmutableList.Builder rollbackRulesBuilder = - ImmutableList.builder(); - ImmutableList.Builder userRulesBuilder = - ImmutableList.builder(); - - for (Rule rule : rules) { - if (rule instanceof RollbackRule) { - rollbackRulesBuilder.add((RollbackRule) rule); - } else if (rule instanceof UserRule) { - userRulesBuilder.add((UserRule) rule); - } - } - - this.rollbackRules = rollbackRulesBuilder.build(); - this.userRules = userRulesBuilder.build(); - } - - @Override - public ToroRuntimeException handleException(Context context, SQLException sqlException) throws - RollbackException { - try { - return handleUserException(context, sqlException); - } catch (UserException userException) { - return new BackendException(context, sqlException); - } - } - - @Override - public ToroRuntimeException handleException(Context context, - DataAccessException dataAccessException) throws RollbackException { - try { - return handleUserException(context, dataAccessException); - } catch (UserException userException) { - return new BackendException(context, dataAccessException); - } - } - - @Override - public ToroRuntimeException handleUserException(Context context, SQLException sqlException) throws - UserException, RollbackException { - if (applyToUserRule(context, sqlException.getSQLState())) { - throw createUserException(context, sqlException.getSQLState(), new BackendException(context, - sqlException)); - } - - if (applyToRollbackRule(context, sqlException.getSQLState())) { - throw new RollbackException(sqlException); - } - - return new BackendException(context, sqlException); - } - - @Override - public ToroRuntimeException handleUserException(Context context, - DataAccessException dataAccessException) throws UserException, RollbackException { - if (applyToUserRule(context, dataAccessException.sqlState())) { - throw createUserException(context, dataAccessException.sqlState(), new BackendException( - context, dataAccessException)); - } - - if (applyToRollbackRule(context, dataAccessException.sqlState())) { - throw new RollbackException(dataAccessException); - } - - return new BackendException(context, dataAccessException); - } - - private boolean applyToRollbackRule(Context context, String sqlState) { - return rollbackRules.stream() - .anyMatch(r -> - r.getSqlCode().equals(sqlState) && (r.getContexts().isEmpty() || r.getContexts() - .contains(context))); - } - - private boolean applyToUserRule(Context context, String sqlState) { - return userRules.stream() - .anyMatch(r -> - r.getSqlCode().equals(sqlState) && (r.getContexts().isEmpty() || r.getContexts() - .contains(context))); - } - - private UserException createUserException(Context context, String sqlState, - BackendException backendException) { - Optional userRule = userRules.stream() - .filter(r -> - r.getSqlCode().equals(sqlState) && (r.getContexts().isEmpty() || r.getContexts() - .contains(context))) - .findFirst(); - if (userRule.isPresent()) { - return userRule.get().translate(backendException); - } - - throw new IllegalArgumentException("User exception not found for context " + context - + " and sqlState " + sqlState); - } - - protected static Rule rollbackRule(String sqlCode, Context... contexts) { - return new RollbackRule(sqlCode, contexts); - } - - protected static Rule userRule( - String sqlCode, Function translateFunction, - Context... contexts) { - return new UserRule(sqlCode, contexts, translateFunction); - } - - protected abstract static class Rule { - - private final String sqlCode; - private final ImmutableSet contexts; - - private Rule(String code, Context[] contexts) { - this.sqlCode = code; - this.contexts = ImmutableSet.copyOf(contexts); - } - - public String getSqlCode() { - return sqlCode; - } - - public ImmutableSet getContexts() { - return contexts; - } - } - - protected static class RollbackRule extends Rule { - - private RollbackRule(String code, Context[] contexts) { - super(code, contexts); - } - } - - protected static class UserRule extends Rule { - - private final Function translateFunction; - - private UserRule(String code, Context[] contexts, - Function translateFunction) { - super(code, contexts); - - this.translateFunction = translateFunction; - } - - public UserException translate(BackendException backendException) { - return translateFunction.apply(backendException); - } - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractIdentifierConstraints.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractIdentifierConstraints.java deleted file mode 100644 index e878f783..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractIdentifierConstraints.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Maps; -import com.torodb.backend.meta.TorodbSchema; -import com.torodb.backend.tables.MetaDocPartTable.DocPartTableFields; -import com.torodb.core.backend.IdentifierConstraints; -import com.torodb.core.exceptions.SystemException; -import com.torodb.core.transaction.metainf.FieldType; - -import java.util.HashSet; -import java.util.Set; - -import javax.annotation.Nonnull; -import javax.inject.Singleton; - -@Singleton -public abstract class AbstractIdentifierConstraints implements IdentifierConstraints { - - private static final char SEPARATOR = '_'; - private static final char ARRAY_DIMENSION_SEPARATOR = '$'; - - private final ImmutableMap fieldTypeIdentifiers; - private final ImmutableMap scalarFieldTypeIdentifiers; - private final ImmutableSet restrictedSchemaNames; - private final ImmutableSet restrictedColumnNames; - - protected AbstractIdentifierConstraints(ImmutableSet restrictedSchemaNames, - ImmutableSet restrictedColumnNames) { - this.fieldTypeIdentifiers = Maps.immutableEnumMap(ImmutableMap.builder() - .put(FieldType.BINARY, 'r') // [r]aw bytes - .put(FieldType.BOOLEAN, 'b') // [b]oolean - .put(FieldType.DOUBLE, 'd') // [d]ouble - .put(FieldType.INSTANT, 't') // [t]imestamp - .put(FieldType.INTEGER, 'i') // [i]nteger - .put(FieldType.LONG, 'l') // [l]ong - .put(FieldType.NULL, 'n') // [n]ull - .put(FieldType.STRING, 's') // [s]tring - .put(FieldType.CHILD, 'e') // child [e]lement - - // Mongo types - .put(FieldType.MONGO_OBJECT_ID, 'x') - .put(FieldType.MONGO_TIME_STAMP, 'y') - // No-Mongo types - .put(FieldType.DATE, 'c') // [c]alendar - .put(FieldType.TIME, 'm') // ti[m]e - - .build()); - - ImmutableMap.Builder scalarFieldTypeIdentifiersBuilder = - ImmutableMap.builder(); - Set fieldTypeIdentifierSet = new HashSet<>(); - for (FieldType fieldType : FieldType.values()) { - if (!this.fieldTypeIdentifiers.containsKey(fieldType)) { - throw new SystemException("FieldType " + fieldType - + " has not been mapped to an identifier."); - } - - char identifier = this.fieldTypeIdentifiers.get(fieldType); - - if ((identifier < 'a' || identifier > 'z') && (identifier < '0' || identifier > '9')) { - throw new SystemException("FieldType " + fieldType + " has an unallowed identifier " - + identifier); - } - - if (fieldTypeIdentifierSet.contains(identifier)) { - throw new SystemException("FieldType " + fieldType + " identifier " - + identifier + " was used by another FieldType."); - } - - fieldTypeIdentifierSet.add(identifier); - - scalarFieldTypeIdentifiersBuilder.put(fieldType, DocPartTableFields.SCALAR.fieldName - + SEPARATOR + identifier); - } - - this.scalarFieldTypeIdentifiers = Maps.immutableEnumMap(scalarFieldTypeIdentifiersBuilder - .build()); - - this.restrictedSchemaNames = ImmutableSet.builder() - .add(TorodbSchema.IDENTIFIER) - .addAll(restrictedSchemaNames) - .build(); - - this.restrictedColumnNames = ImmutableSet.builder() - .add(DocPartTableFields.DID.fieldName) - .add(DocPartTableFields.RID.fieldName) - .add(DocPartTableFields.PID.fieldName) - .add(DocPartTableFields.SEQ.fieldName) - .addAll(scalarFieldTypeIdentifiers.values()) - .addAll(restrictedColumnNames) - .build(); - } - - @Override - public char getSeparator() { - return SEPARATOR; - } - - @Override - public char getArrayDimensionSeparator() { - return ARRAY_DIMENSION_SEPARATOR; - } - - @Override - public boolean isAllowedSchemaIdentifier(@Nonnull String schemaName) { - return !restrictedSchemaNames.contains(schemaName); - } - - @Override - public boolean isAllowedTableIdentifier(@Nonnull String columnName) { - return true; - } - - @Override - public boolean isAllowedColumnIdentifier(@Nonnull String columnName) { - return !restrictedColumnNames.contains(columnName); - } - - @Override - public boolean isAllowedIndexIdentifier(@Nonnull String indexName) { - return true; - } - - @Override - public boolean isSameIdentifier(@Nonnull String leftIdentifier, @Nonnull String rightIdentifier) { - return leftIdentifier.equals(rightIdentifier); - //leftIdentifier.toLowerCase(Locale.US).equals(rightIdentifier.toLowerCase(Locale.US)); - } - - @Override - public char getFieldTypeIdentifier(FieldType fieldType) { - return fieldTypeIdentifiers.get(fieldType); - } - - @Override - public String getScalarIdentifier(FieldType fieldType) { - return scalarFieldTypeIdentifiers.get(fieldType); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataReadInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataReadInterface.java deleted file mode 100644 index 9bce79c0..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataReadInterface.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.base.Preconditions; -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.backend.tables.KvTable; -import com.torodb.backend.tables.MetaDocPartTable; -import com.torodb.backend.tables.records.KvRecord; -import com.torodb.backend.tables.records.MetaDatabaseRecord; -import com.torodb.core.TableRef; -import com.torodb.core.backend.MetaInfoKey; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex; -import com.torodb.core.transaction.metainf.MetaIndex; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.jooq.Condition; -import org.jooq.DSLContext; -import org.jooq.Record; -import org.jooq.Record1; -import org.jooq.Result; - -import java.util.Collection; -import java.util.Iterator; -import java.util.Optional; -import java.util.stream.Stream; - -import javax.annotation.Nonnull; -import javax.inject.Inject; -import javax.inject.Singleton; - -/** - * - */ -@Singleton -@SuppressFBWarnings("SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING") -public abstract class AbstractMetaDataReadInterface implements MetaDataReadInterface { - - private final MetaDocPartTable metaDocPartTable; - private final SqlHelper sqlHelper; - - @Inject - public AbstractMetaDataReadInterface(MetaDocPartTable metaDocPartTable, - SqlHelper sqlHelper) { - this.metaDocPartTable = metaDocPartTable; - this.sqlHelper = sqlHelper; - } - - @Override - public long getDatabaseSize( - @Nonnull DSLContext dsl, - @Nonnull MetaDatabase database - ) { - String statement = getReadSchemaSizeStatement(database.getIdentifier()); - Result result = sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH, - ps -> { - ps.setString(1, database.getName()); - } - ); - - if (result.isEmpty()) { - return 0; - } - - Long resultSize = result.get(0).into(Long.class); - - if (resultSize == null) { - return 0; - } - - return resultSize; - } - - protected abstract String getReadSchemaSizeStatement(String databaseName); - - @Override - public long getCollectionSize( - @Nonnull DSLContext dsl, - @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection - ) { - String statement = getReadCollectionSizeStatement(); - return sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH, - ps -> { - ps.setString(1, database.getName()); - ps.setString(2, database.getIdentifier()); - ps.setString(3, collection.getName()); - }) - .get(0) - .into(Long.class); - } - - protected abstract String getReadCollectionSizeStatement(); - - @Override - public long getDocumentsSize( - @Nonnull DSLContext dsl, - @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection - ) { - String statement = getReadDocumentsSizeStatement(); - return sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH, - ps -> { - ps.setString(1, database.getName()); - ps.setString(2, database.getIdentifier()); - ps.setString(3, collection.getName()); - }) - .get(0) - .into(Long.class); - } - - protected abstract String getReadDocumentsSizeStatement(); - - @Override - public Long getIndexSize( - @Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull String indexName) { - long result = 0; - MetaIndex index = collection.getMetaIndexByName(indexName); - Iterator tableRefIterator = index.streamTableRefs().iterator(); - while (tableRefIterator.hasNext()) { - TableRef tableRef = tableRefIterator.next(); - MetaDocPart docPart = collection.getMetaDocPartByTableRef(tableRef); - Iterator docPartIndexIterator = docPart.streamIndexes() - .iterator(); - while (docPartIndexIterator.hasNext()) { - MetaIdentifiedDocPartIndex docPartIndex = docPartIndexIterator.next(); - if (index.isCompatible(docPart, docPartIndex)) { - long relatedIndexCount = collection.streamContainedMetaIndexes() - .filter(i -> i.isCompatible(docPart, docPartIndex)).count(); - String statement = getReadIndexSizeStatement(database.getIdentifier(), - docPart.getIdentifier(), docPartIndex.getIdentifier()); - result += sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH) - .get(0).into(Long.class) / relatedIndexCount; - } - } - } - return result; - } - - protected abstract String getReadIndexSizeStatement( - String schemaName, String tableName, String indexName); - - @Override - public Collection> getInternalFields(MetaDocPart metaDocPart) { - TableRef tableRef = metaDocPart.getTableRef(); - return getInternalFields(tableRef); - } - - @Override - public Collection> getInternalFields(TableRef tableRef) { - if (tableRef.isRoot()) { - return metaDocPartTable.ROOT_FIELDS; - } else if (tableRef.getParent().get().isRoot()) { - return metaDocPartTable.FIRST_FIELDS; - } - return metaDocPartTable.FIELDS; - } - - @Override - public Collection> getPrimaryKeyInternalFields(TableRef tableRef) { - if (tableRef.isRoot()) { - return metaDocPartTable.PRIMARY_KEY_ROOT_FIELDS; - } else if (tableRef.getParent().get().isRoot()) { - return metaDocPartTable.PRIMARY_KEY_FIRST_FIELDS; - } - return metaDocPartTable.PRIMARY_KEY_FIELDS; - } - - @Override - public Collection> getReferenceInternalFields(TableRef tableRef) { - Preconditions.checkArgument(!tableRef.isRoot()); - if (tableRef.getParent().get().isRoot()) { - return metaDocPartTable.REFERENCE_FIRST_FIELDS; - } - return metaDocPartTable.REFERENCE_FIELDS; - } - - @Override - public Collection> getForeignInternalFields(TableRef tableRef) { - Preconditions.checkArgument(!tableRef.isRoot()); - TableRef parentTableRef = tableRef.getParent().get(); - if (parentTableRef.isRoot()) { - return metaDocPartTable.FOREIGN_ROOT_FIELDS; - } else if (parentTableRef.getParent().get().isRoot()) { - return metaDocPartTable.FOREIGN_FIRST_FIELDS; - } - return metaDocPartTable.FOREIGN_FIELDS; - } - - @Override - public Collection> getReadInternalFields(MetaDocPart metaDocPart) { - TableRef tableRef = metaDocPart.getTableRef(); - return getReadInternalFields(tableRef); - } - - @Override - public Collection> getReadInternalFields(TableRef tableRef) { - if (tableRef.isRoot()) { - return metaDocPartTable.READ_ROOT_FIELDS; - } else if (tableRef.getParent().get().isRoot()) { - return metaDocPartTable.READ_FIRST_FIELDS; - } - return metaDocPartTable.READ_FIELDS; - } - - @Override - public Optional readKv(DSLContext dsl, MetaInfoKey key) { - KvTable kvTable = getKvTable(); - Condition c = kvTable.KEY.eq(key.getKeyName()); - - return dsl.select(kvTable.VALUE) - .from(kvTable) - .where(c) - .fetchOptional() - .map(Record1::value1); - } - - @Override - public Stream readMetaDatabaseTable(DSLContext dsl) { - return dsl.selectFrom(getMetaDatabaseTable()) - .fetchStream(); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataWriteInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataWriteInterface.java deleted file mode 100644 index 3d153992..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractMetaDataWriteInterface.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.backend.tables.KvTable; -import com.torodb.backend.tables.MetaCollectionTable; -import com.torodb.backend.tables.MetaDatabaseTable; -import com.torodb.backend.tables.MetaDocPartIndexColumnTable; -import com.torodb.backend.tables.MetaDocPartIndexTable; -import com.torodb.backend.tables.MetaDocPartTable; -import com.torodb.backend.tables.MetaFieldTable; -import com.torodb.backend.tables.MetaIndexFieldTable; -import com.torodb.backend.tables.MetaIndexTable; -import com.torodb.backend.tables.MetaScalarTable; -import com.torodb.core.TableRef; -import com.torodb.core.backend.MetaInfoKey; -import com.torodb.core.transaction.metainf.FieldIndexOrdering; -import com.torodb.core.transaction.metainf.FieldType; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaDocPartIndexColumn; -import com.torodb.core.transaction.metainf.MetaField; -import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex; -import com.torodb.core.transaction.metainf.MetaIndex; -import com.torodb.core.transaction.metainf.MetaIndexField; -import com.torodb.core.transaction.metainf.MetaScalar; -import org.jooq.Condition; -import org.jooq.DSLContext; -import org.jooq.Record1; -import org.jooq.TableField; -import org.jooq.conf.ParamType; - -import java.util.Optional; - -import javax.inject.Singleton; - -@Singleton -public abstract class AbstractMetaDataWriteInterface implements MetaDataWriteInterface { - - private final MetaDatabaseTable metaDatabaseTable; - private final MetaCollectionTable metaCollectionTable; - private final MetaDocPartTable metaDocPartTable; - private final MetaFieldTable metaFieldTable; - private final MetaScalarTable metaScalarTable; - private final MetaIndexTable metaIndexTable; - private final MetaIndexFieldTable metaIndexFieldTable; - private final MetaDocPartIndexTable metaDocPartIndexTable; - private final MetaDocPartIndexColumnTable metaDocPartIndexColumnTable; - private final KvTable kvTable; - private final SqlHelper sqlHelper; - - public AbstractMetaDataWriteInterface(MetaDataReadInterface metaDataReadInterface, - SqlHelper sqlHelper) { - this.metaDatabaseTable = metaDataReadInterface.getMetaDatabaseTable(); - this.metaCollectionTable = metaDataReadInterface.getMetaCollectionTable(); - this.metaDocPartTable = metaDataReadInterface.getMetaDocPartTable(); - this.metaFieldTable = metaDataReadInterface.getMetaFieldTable(); - this.metaScalarTable = metaDataReadInterface.getMetaScalarTable(); - this.metaIndexTable = metaDataReadInterface.getMetaIndexTable(); - this.metaIndexFieldTable = metaDataReadInterface.getMetaIndexFieldTable(); - this.metaDocPartIndexTable = metaDataReadInterface.getMetaDocPartIndexTable(); - this.metaDocPartIndexColumnTable = metaDataReadInterface.getMetaDocPartIndexColumnTable(); - this.kvTable = metaDataReadInterface.getKvTable(); - this.sqlHelper = sqlHelper; - } - - @Override - public void createMetaDatabaseTable(DSLContext dsl) { - String schemaName = metaDatabaseTable.getSchema().getName(); - String tableName = metaDatabaseTable.getName(); - String statement = getCreateMetaDatabaseTableStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetaDatabaseTableStatement(String schemaName, - String tableName); - - @Override - public void createMetaCollectionTable(DSLContext dsl) { - String schemaName = metaCollectionTable.getSchema().getName(); - String tableName = metaCollectionTable.getName(); - String statement = getCreateMetaCollectionTableStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetaCollectionTableStatement(String schemaName, - String tableName); - - @Override - public void createMetaDocPartTable(DSLContext dsl) { - String schemaName = metaDocPartTable.getSchema().getName(); - String tableName = metaDocPartTable.getName(); - String statement = getCreateMetaDocPartTableStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetaDocPartTableStatement(String schemaName, String tableName); - - @Override - public void createMetaFieldTable(DSLContext dsl) { - String schemaName = metaFieldTable.getSchema().getName(); - String tableName = metaFieldTable.getName(); - String statement = getCreateMetaFieldTableStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetaFieldTableStatement(String schemaName, String tableName); - - @Override - public void createMetaScalarTable(DSLContext dsl) { - String schemaName = metaScalarTable.getSchema().getName(); - String tableName = metaScalarTable.getName(); - String statement = getCreateMetaScalarTableStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetaScalarTableStatement(String schemaName, String tableName); - - @Override - public void createMetaIndexTable(DSLContext dsl) { - String schemaName = metaIndexTable.getSchema().getName(); - String tableName = metaIndexTable.getName(); - String statement = getCreateMetaIndexTableStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetaIndexTableStatement(String schemaName, String tableName); - - @Override - public void createMetaIndexFieldTable(DSLContext dsl) { - String schemaName = metaIndexFieldTable.getSchema().getName(); - String tableName = metaIndexFieldTable.getName(); - String statement = getCreateMetaIndexFieldTableStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetaIndexFieldTableStatement(String schemaName, - String tableName); - - @Override - public void createMetaDocPartIndexTable(DSLContext dsl) { - String schemaName = metaDocPartIndexTable.getSchema().getName(); - String tableName = metaDocPartIndexTable.getName(); - String statement = getCreateMetaDocPartIndexTableStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetaDocPartIndexTableStatement(String schemaName, - String tableName); - - @Override - public void createMetaFieldIndexTable(DSLContext dsl) { - String schemaName = metaDocPartIndexColumnTable.getSchema().getName(); - String tableName = metaDocPartIndexColumnTable.getName(); - String statement = getCreateMetaDocPartIndexColumnTableStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetaDocPartIndexColumnTableStatement(String schemaName, - String tableName); - - @Override - public void createKvTable(DSLContext dsl) { - String schemaName = kvTable.getSchema().getName(); - String tableName = kvTable.getName(); - String statement = getCreateMetainfStatement(schemaName, tableName); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateMetainfStatement(String schemaName, String tableName); - - @Override - public void addMetaDatabase(DSLContext dsl, MetaDatabase database) { - String statement = getAddMetaDatabaseStatement(database.getName(), database.getIdentifier()); - sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT); - } - - @Override - public void addMetaCollection(DSLContext dsl, MetaDatabase database, MetaCollection collection) { - String statement = getAddMetaCollectionStatement(database.getName(), collection.getName(), - collection.getIdentifier()); - sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT); - } - - @Override - public void addMetaDocPart(DSLContext dsl, MetaDatabase database, MetaCollection collection, - MetaDocPart docPart) { - String statement = getAddMetaDocPartStatement(database.getName(), collection.getName(), docPart - .getTableRef(), - docPart.getIdentifier()); - sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT); - } - - @Override - public void addMetaField(DSLContext dsl, MetaDatabase database, MetaCollection collection, - MetaDocPart docPart, MetaField field) { - String statement = getAddMetaFieldStatement(database.getName(), collection.getName(), docPart - .getTableRef(), - field.getName(), field.getIdentifier(), - field.getType()); - sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT); - } - - @Override - public void addMetaScalar(DSLContext dsl, MetaDatabase database, MetaCollection collection, - MetaDocPart docPart, MetaScalar scalar) { - String statement = getAddMetaScalarStatement(database.getName(), collection.getName(), docPart - .getTableRef(), - scalar.getIdentifier(), scalar.getType()); - sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT); - } - - @Override - public void addMetaIndex(DSLContext dsl, MetaDatabase database, MetaCollection collection, - MetaIndex index) { - String statement = getAddMetaIndexStatement(database.getName(), collection.getName(), index - .getName(), - index.isUnique()); - sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT); - } - - @Override - public void addMetaIndexField(DSLContext dsl, MetaDatabase database, MetaCollection collection, - MetaIndex index, MetaIndexField field) { - String statement = getAddMetaIndexFieldStatement(database.getName(), collection.getName(), index - .getName(), - field.getPosition(), field.getTableRef(), field.getName(), field.getOrdering()); - sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT); - } - - @Override - public void addMetaDocPartIndex(DSLContext dsl, MetaDatabase database, MetaCollection collection, - MetaDocPart docPart, MetaIdentifiedDocPartIndex index) { - String statement = getAddMetaDocPartIndexStatement(database.getName(), index.getIdentifier(), - collection.getName(), - docPart.getTableRef(), index.isUnique()); - sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT); - } - - @Override - public void addMetaDocPartIndexColumn(DSLContext dsl, MetaDatabase database, - MetaCollection collection, - MetaDocPart docPart, MetaIdentifiedDocPartIndex index, MetaDocPartIndexColumn column) { - String statement = getAddMetaDocPartIndexColumnStatement(database.getName(), index - .getIdentifier(), column.getPosition(), - collection.getName(), docPart.getTableRef(), column.getIdentifier(), column.getOrdering()); - sqlHelper.executeUpdate(dsl, statement, Context.META_INSERT); - } - - protected String getAddMetaDatabaseStatement(String databaseName, String databaseIdentifier) { - String statement = sqlHelper.dsl().insertInto(metaDatabaseTable) - .set(metaDatabaseTable.newRecord().values(databaseName, databaseIdentifier)).getSQL( - ParamType.INLINED); - return statement; - } - - protected String getAddMetaCollectionStatement(String databaseName, String collectionName, - String collectionIdentifier) { - String statement = sqlHelper.dsl().insertInto(metaCollectionTable) - .set(metaCollectionTable.newRecord() - .values(databaseName, collectionName, collectionIdentifier)).getSQL(ParamType.INLINED); - return statement; - } - - protected String getAddMetaDocPartStatement(String databaseName, String collectionName, - TableRef tableRef, - String docPartIdentifier) { - String statement = sqlHelper.dsl().insertInto(metaDocPartTable) - .set(metaDocPartTable.newRecord() - .values(databaseName, collectionName, tableRef, docPartIdentifier)).getSQL( - ParamType.INLINED); - return statement; - } - - protected String getAddMetaFieldStatement(String databaseName, String collectionName, - TableRef tableRef, - String fieldName, String fieldIdentifier, FieldType type) { - String statement = sqlHelper.dsl().insertInto(metaFieldTable) - .set(metaFieldTable.newRecord() - .values(databaseName, collectionName, tableRef, fieldName, type, fieldIdentifier)) - .getSQL(ParamType.INLINED); - return statement; - } - - protected String getAddMetaScalarStatement(String databaseName, String collectionName, - TableRef tableRef, - String fieldIdentifier, FieldType type) { - String statement = sqlHelper.dsl().insertInto(metaScalarTable) - .set(metaScalarTable.newRecord() - .values(databaseName, collectionName, tableRef, type, fieldIdentifier)).getSQL( - ParamType.INLINED); - return statement; - } - - protected String getAddMetaIndexStatement(String databaseName, String collectionName, - String indexName, boolean unique) { - String statement = sqlHelper.dsl().insertInto(metaIndexTable) - .set(metaIndexTable.newRecord() - .values(databaseName, collectionName, indexName, unique)).getSQL(ParamType.INLINED); - return statement; - } - - protected String getAddMetaIndexFieldStatement(String databaseName, String collectionName, - String indexName, - int position, TableRef tableRef, String fieldName, FieldIndexOrdering ordering) { - String statement = sqlHelper.dsl().insertInto(metaIndexFieldTable) - .set(metaIndexFieldTable.newRecord() - .values( - databaseName, - collectionName, - indexName, - position, - tableRef, - fieldName, - ordering)) - .getSQL(ParamType.INLINED); - return statement; - } - - protected String getAddMetaDocPartIndexStatement(String databaseName, String indexName, - String collectionName, - TableRef tableRef, boolean unique) { - String statement = sqlHelper.dsl().insertInto(metaDocPartIndexTable) - .set(metaDocPartIndexTable.newRecord() - .values(databaseName, indexName, collectionName, tableRef, unique)).getSQL( - ParamType.INLINED); - return statement; - } - - protected String getAddMetaDocPartIndexColumnStatement(String databaseName, String indexName, - int position, String collectionName, - TableRef tableRef, String columnName, FieldIndexOrdering ordering) { - String statement = sqlHelper.dsl().insertInto(metaDocPartIndexColumnTable) - .set(metaDocPartIndexColumnTable.newRecord() - .values(databaseName, indexName, position, collectionName, tableRef, columnName, - ordering)).getSQL(ParamType.INLINED); - return statement; - } - - @Override - public void deleteMetaDatabase(DSLContext dsl, MetaDatabase database) { - String statement = getDeleteMetaDatabaseStatement(database.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - } - - @Override - public void deleteMetaCollection(DSLContext dsl, MetaDatabase database, - MetaCollection collection) { - String statement = getCascadeDeleteMetaDocPartIndexColumnStatement(database.getName(), - collection.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - statement = getCascadeDeleteMetaDocPartIndexStatement(database.getName(), collection.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - - statement = getCascadeDeleteMetaScalarStatement(database.getName(), collection.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - statement = getCascadeDeleteMetaFieldStatement(database.getName(), collection.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - statement = getCascadeDeleteMetaDocPartStatement(database.getName(), collection.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - - statement = getCascadeDeleteMetaIndexFieldStatement(database.getName(), collection.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - statement = getCascadeDeleteMetaIndexStatement(database.getName(), collection.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - - statement = getDeleteMetaCollectionStatement(database.getName(), collection.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - } - - @Override - public void deleteMetaIndex(DSLContext dsl, MetaDatabase database, MetaCollection collection, - MetaIndex index) { - String statement = getCascadeDeleteMetaIndexFieldStatement(database.getName(), collection - .getName(), index.getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - statement = getDeleteMetaIndexStatement(database.getName(), collection.getName(), index - .getName()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - } - - @Override - public void deleteMetaDocPartIndex(DSLContext dsl, MetaDatabase database, - MetaCollection collection, MetaDocPart docPart, MetaIdentifiedDocPartIndex index) { - String statement = getCascadeDeleteMetaDocPartIndexColumnStatement(database.getName(), - collection.getName(), index.getIdentifier()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - statement = getDeleteMetaDocPartIndexStatement(database.getName(), collection.getName(), index - .getIdentifier()); - sqlHelper.executeUpdate(dsl, statement, Context.META_DELETE); - } - - protected String getDeleteMetaDatabaseStatement(String databaseName) { - String statement = sqlHelper.dsl().deleteFrom(metaDatabaseTable) - .where(metaDatabaseTable.NAME.eq(databaseName)).getSQL(ParamType.INLINED); - return statement; - } - - protected String getDeleteMetaCollectionStatement(String databaseName, String collectionName) { - String statement = sqlHelper.dsl().deleteFrom(metaCollectionTable) - .where(metaCollectionTable.DATABASE.eq(databaseName) - .and(metaCollectionTable.NAME.eq(collectionName))).getSQL(ParamType.INLINED); - return statement; - } - - protected String getCascadeDeleteMetaDocPartStatement(String databaseName, - String collectionName) { - String statement = sqlHelper.dsl().deleteFrom(metaDocPartTable) - .where(metaDocPartTable.DATABASE.eq(databaseName) - .and(metaDocPartTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED); - return statement; - } - - protected String getCascadeDeleteMetaFieldStatement(String databaseName, String collectionName) { - String statement = sqlHelper.dsl().deleteFrom(metaFieldTable) - .where(metaFieldTable.DATABASE.eq(databaseName) - .and(metaFieldTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED); - return statement; - } - - protected String getCascadeDeleteMetaScalarStatement(String databaseName, String collectionName) { - String statement = sqlHelper.dsl().deleteFrom(metaScalarTable) - .where(metaScalarTable.DATABASE.eq(databaseName) - .and(metaScalarTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED); - return statement; - } - - protected String getCascadeDeleteMetaIndexStatement(String databaseName, String collectionName) { - String statement = sqlHelper.dsl().deleteFrom(metaIndexTable) - .where(metaIndexTable.DATABASE.eq(databaseName) - .and(metaIndexTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED); - return statement; - } - - protected String getCascadeDeleteMetaIndexFieldStatement(String databaseName, - String collectionName) { - String statement = sqlHelper.dsl().deleteFrom(metaIndexFieldTable) - .where(metaIndexFieldTable.DATABASE.eq(databaseName) - .and(metaIndexFieldTable.COLLECTION.eq(collectionName))).getSQL(ParamType.INLINED); - return statement; - } - - protected String getCascadeDeleteMetaIndexFieldStatement(String databaseName, - String collectionName, String indexName) { - String statement = sqlHelper.dsl().deleteFrom(metaIndexFieldTable) - .where(metaIndexFieldTable.DATABASE.eq(databaseName) - .and(metaIndexFieldTable.COLLECTION.eq(collectionName)) - .and(metaIndexFieldTable.INDEX.eq(indexName))).getSQL(ParamType.INLINED); - return statement; - } - - protected String getCascadeDeleteMetaDocPartIndexStatement(String databaseName, - String collectionName) { - String statement = sqlHelper.dsl().deleteFrom(metaDocPartIndexTable) - .where(metaDocPartIndexTable.DATABASE.eq(databaseName)).getSQL(ParamType.INLINED); - return statement; - } - - protected String getCascadeDeleteMetaDocPartIndexColumnStatement(String databaseName, - String collectionName) { - String statement = sqlHelper.dsl().deleteFrom(metaDocPartIndexColumnTable) - .where(metaDocPartIndexColumnTable.DATABASE.eq(databaseName)).getSQL(ParamType.INLINED); - return statement; - } - - protected String getCascadeDeleteMetaDocPartIndexColumnStatement(String databaseName, - String collectionName, String indexIdentifier) { - String statement = sqlHelper.dsl().deleteFrom(metaDocPartIndexColumnTable) - .where(metaDocPartIndexColumnTable.DATABASE.eq(databaseName) - .and(metaDocPartIndexColumnTable.INDEX_IDENTIFIER.eq(indexIdentifier))).getSQL( - ParamType.INLINED); - return statement; - } - - protected String getDeleteMetaIndexStatement(String databaseName, String collectionName, - String indexName) { - String statement = sqlHelper.dsl().deleteFrom(metaIndexTable) - .where(metaIndexTable.DATABASE.eq(databaseName) - .and(metaIndexTable.COLLECTION.eq(collectionName)) - .and(metaIndexTable.NAME.eq(indexName))).getSQL(ParamType.INLINED); - return statement; - } - - protected String getDeleteMetaDocPartIndexStatement(String databaseName, String collectionName, - String indexIdentifier) { - String statement = sqlHelper.dsl().deleteFrom(metaDocPartIndexTable) - .where(metaDocPartIndexTable.DATABASE.eq(databaseName) - .and(metaDocPartIndexTable.IDENTIFIER.eq(indexIdentifier))).getSQL(ParamType.INLINED); - return statement; - } - - @Override - public int consumeRids(DSLContext dsl, MetaDatabase database, MetaCollection collection, - MetaDocPart docPart, int count) { - Record1 lastRid = dsl.select(metaDocPartTable.LAST_RID).from(metaDocPartTable).where( - metaDocPartTable.DATABASE.eq(database.getName()) - .and(metaDocPartTable.COLLECTION.eq(collection.getName())) - .and(getTableRefEqCondition(metaDocPartTable.TABLE_REF, docPart.getTableRef()))) - .fetchOne(); - dsl.update(metaDocPartTable).set(metaDocPartTable.LAST_RID, metaDocPartTable.LAST_RID - .plus(count)).where( - metaDocPartTable.DATABASE.eq(database.getName()) - .and(metaDocPartTable.COLLECTION.eq(collection.getName())) - .and(getTableRefEqCondition(metaDocPartTable.TABLE_REF, docPart.getTableRef()))) - .execute(); - return lastRid.value1(); - } - - protected abstract Condition getTableRefEqCondition(TableField field, TableRef tableRef); - - @Override - public String writeMetaInfo(DSLContext dsl, MetaInfoKey key, String newValue) { - Condition c = kvTable.KEY.eq(key.getKeyName()); - - Optional oldValue = dsl.select(kvTable.VALUE) - .from(kvTable) - .where(c) - .fetchOptional() - .map(Record1::value1); - - if (oldValue.isPresent()) { - int updatedRows = dsl.update(kvTable) - .set(kvTable.KEY, key.getKeyName()) - .set(kvTable.VALUE, newValue) - .where(c) - .execute(); - assert updatedRows == 1; - } else { - int newRows = dsl.insertInto(kvTable, kvTable.KEY, kvTable.VALUE) - .values(key.getKeyName(), newValue) - .execute(); - assert newRows == 1; - } - return oldValue.orElse(null); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractReadInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractReadInterface.java deleted file mode 100644 index 0a4c41d5..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractReadInterface.java +++ /dev/null @@ -1,422 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.Multimap; -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.backend.d2r.ResultSetDocPartResult; -import com.torodb.backend.tables.MetaDocPartTable.DocPartTableFields; -import com.torodb.core.TableRef; -import com.torodb.core.TableRefFactory; -import com.torodb.core.cursors.Cursor; -import com.torodb.core.cursors.EmptyCursor; -import com.torodb.core.cursors.IteratorCursor; -import com.torodb.core.d2r.DocPartResult; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaField; -import com.torodb.kvdocument.values.KvValue; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.jooq.DSLContext; -import org.jooq.lambda.Seq; -import org.jooq.lambda.Unchecked; -import org.jooq.lambda.tuple.Tuple2; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import javax.annotation.Nonnull; -import javax.inject.Provider; -import javax.inject.Singleton; - -/** - * - */ -@Singleton -@SuppressFBWarnings("SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING") -public abstract class AbstractReadInterface implements ReadInterface { - - private final MetaDataReadInterface metaDataReadInterface; - private final DataTypeProvider dataTypeProvider; - private final ErrorHandler errorHandler; - private final SqlHelper sqlHelper; - private final TableRefFactory tableRefFactory; - - public AbstractReadInterface(MetaDataReadInterface metaDataReadInterface, - DataTypeProvider dataTypeProvider, - ErrorHandler errorHandler, SqlHelper sqlHelper, TableRefFactory tableRefFactory) { - this.metaDataReadInterface = metaDataReadInterface; - this.dataTypeProvider = dataTypeProvider; - this.errorHandler = errorHandler; - this.sqlHelper = sqlHelper; - this.tableRefFactory = tableRefFactory; - } - - @Override - @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"}, - justification = - "ResultSet is wrapped in a Cursor. It's iterated and closed in caller code") - public Cursor getCollectionDidsWithFieldEqualsTo(DSLContext dsl, - MetaDatabase metaDatabase, - MetaCollection metaCol, MetaDocPart metaDocPart, MetaField metaField, KvValue value) - throws SQLException { - assert metaDatabase.getMetaCollectionByIdentifier(metaCol.getIdentifier()) != null; - assert metaCol.getMetaDocPartByIdentifier(metaDocPart.getIdentifier()) != null; - assert metaDocPart.getMetaFieldByIdentifier(metaField.getIdentifier()) != null; - - String statement = getReadCollectionDidsWithFieldEqualsToStatement(metaDatabase.getIdentifier(), - metaDocPart.getIdentifier(), metaField.getIdentifier()); - Connection connection = dsl.configuration().connectionProvider().acquire(); - try { - PreparedStatement preparedStatement = connection.prepareStatement(statement); - sqlHelper.setPreparedStatementValue(preparedStatement, 1, metaField.getType(), value); - return new DefaultDidCursor(errorHandler, preparedStatement.executeQuery()); - } finally { - dsl.configuration().connectionProvider().release(connection); - } - } - - protected abstract String getReadCollectionDidsWithFieldEqualsToStatement(String schemaName, - String rootTableName, - String columnName); - - @Override - @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"}, - justification = - "ResultSet is wrapped in a Cursor. It's iterated and closed in caller code") - public Cursor getCollectionDidsWithFieldsIn(DSLContext dsl, MetaDatabase metaDatabase, - MetaCollection metaCol, MetaDocPart metaDocPart, - Multimap> valuesMultimap) - throws SQLException { - assert metaDatabase.getMetaCollectionByIdentifier(metaCol.getIdentifier()) != null; - assert metaCol.getMetaDocPartByIdentifier(metaDocPart.getIdentifier()) != null; - assert valuesMultimap.keySet().stream().allMatch(metafield -> metaDocPart - .getMetaFieldByIdentifier(metafield.getIdentifier()) != null); - - if (valuesMultimap.size() > 500) { - @SuppressWarnings("checkstyle:LineLength") - Stream>, Long>>>> valuesEntriesBatchStream = - Seq.seq(valuesMultimap.entries().stream()) - .zipWithIndex() - .groupBy(t -> t.v2 / 500) - .entrySet() - .stream(); - Stream>>> valuesEntryBatchStreamOfStream = - valuesEntriesBatchStream - .map(e -> e.getValue() - .stream() - .map(se -> se.v1)); - Stream>> valuesMultimapBatchStream = - valuesEntryBatchStreamOfStream - .map(e -> toValuesMultimap(e)); - Stream> didCursorStream = - valuesMultimapBatchStream - .map(Unchecked.function(valuesMultimapBatch -> - getCollectionDidsWithFieldsInBatch( - dsl, - metaDatabase, - metaCol, - metaDocPart, - valuesMultimapBatch))); - Stream didStream = didCursorStream - .flatMap(cursor -> cursor.getRemaining().stream()); - - return new IteratorCursor<>(didStream.iterator()); - } - - return getCollectionDidsWithFieldsInBatch(dsl, metaDatabase, metaCol, metaDocPart, - valuesMultimap); - } - - private Multimap> toValuesMultimap( - Stream>> valueEntryStream) { - Multimap> valuesMultimap = ArrayListMultimap.create(); - - valueEntryStream.forEach(e -> valuesMultimap.put(e.getKey(), e.getValue())); - - return valuesMultimap; - } - - @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"}, - justification = - "ResultSet is wrapped in a Cursor. It's iterated and closed in caller code") - private Cursor getCollectionDidsWithFieldsInBatch(DSLContext dsl, - MetaDatabase metaDatabase, - MetaCollection metaCol, MetaDocPart metaDocPart, - Multimap> valuesMultimap) - throws SQLException { - @SuppressWarnings("checkstyle:LineLength") - Provider>>>> valuesMultimapSortedStreamProvider = - () -> valuesMultimap.asMap().entrySet().stream() - .sorted((e1, e2) -> e1.getKey().getIdentifier().compareTo(e2.getKey().getIdentifier())); - String statement = getReadCollectionDidsWithFieldInStatement(metaDatabase.getIdentifier(), - metaDocPart.getIdentifier(), valuesMultimapSortedStreamProvider.get() - .map(e -> new Tuple2(e.getKey().getIdentifier(), e.getValue().size()))); - Connection connection = dsl.configuration().connectionProvider().acquire(); - try { - PreparedStatement preparedStatement = connection.prepareStatement(statement); - int parameterIndex = 1; - Iterator>>> valuesMultimapSortedIterator = - valuesMultimapSortedStreamProvider.get().iterator(); - while (valuesMultimapSortedIterator.hasNext()) { - Map.Entry>> valuesMultimapEntry = - valuesMultimapSortedIterator.next(); - for (KvValue value : valuesMultimapEntry.getValue()) { - sqlHelper.setPreparedStatementValue(preparedStatement, parameterIndex, valuesMultimapEntry - .getKey().getType(), value); - parameterIndex++; - } - } - return new DefaultDidCursor(errorHandler, preparedStatement.executeQuery()); - } finally { - dsl.configuration().connectionProvider().release(connection); - } - } - - protected abstract String getReadCollectionDidsWithFieldInStatement(String schemaName, - String rootTableName, - Stream> valuesCountList); - - @Override - @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"}, - justification = "ResultSet is wrapped in a Cursor>>. It's " - + "iterated and closed in caller code") - public Cursor>> getCollectionDidsAndProjectionWithFieldsIn( - DSLContext dsl, MetaDatabase metaDatabase, - MetaCollection metaCol, MetaDocPart metaDocPart, - Multimap> valuesMultimap) - throws SQLException { - assert metaDatabase.getMetaCollectionByIdentifier(metaCol.getIdentifier()) != null; - assert metaCol.getMetaDocPartByIdentifier(metaDocPart.getIdentifier()) != null; - assert valuesMultimap.keySet().stream().allMatch(metafield -> metaDocPart - .getMetaFieldByIdentifier(metafield.getIdentifier()) != null); - - Stream>>> valuesBatchStream = - valuesMultimap.asMap().entrySet().stream() - .map(e -> new Tuple2>>(e.getKey(), e.getValue())); - if (valuesMultimap.asMap().entrySet().stream().anyMatch(e -> e.getValue().size() > 500)) { - valuesBatchStream = valuesBatchStream - .flatMap(e -> Seq.seq(e.v2.stream()) - .zipWithIndex() - .groupBy(t -> t.v2 / 500) - .entrySet() - .stream() - .map(se -> toValuesMap(e.v1, se))); - } - Stream>>> didProjectionCursorStream = - valuesBatchStream - .map(Unchecked.function(mapBatch -> - getCollectionDidsAndProjectionWithFieldsInBatch( - dsl, - metaDatabase, - metaCol, - metaDocPart, - mapBatch.v1, - mapBatch.v2))); - Stream>> didProjectionStream = - didProjectionCursorStream - .flatMap(cursor -> cursor.getRemaining().stream()); - - return new IteratorCursor<>(didProjectionStream.iterator()); - } - - @SuppressWarnings("rawtypes") - private Tuple2>> toValuesMap(MetaField metaField, - Entry, Long>>> groupedValuesMap) { - List collect = groupedValuesMap.getValue().stream() - .map(e -> (KvValue) e.v1) - .collect(Collectors.toList()); - - return new Tuple2>>(metaField, collect.stream() - .map(e -> (KvValue) e) - .collect(Collectors.toList())); - } - - @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"}, - justification = "ResultSet is wrapped in a Cursor>>. " - + "It's iterated and closed in caller code") - private Cursor>> getCollectionDidsAndProjectionWithFieldsInBatch( - DSLContext dsl, MetaDatabase metaDatabase, - MetaCollection metaCol, MetaDocPart metaDocPart, MetaField metaField, - Collection> values) - throws SQLException { - String statement = getReadCollectionDidsAndProjectionWithFieldInStatement(metaDatabase - .getIdentifier(), - metaDocPart.getIdentifier(), metaField.getIdentifier(), values.size()); - Connection connection = dsl.configuration().connectionProvider().acquire(); - try { - PreparedStatement preparedStatement = connection.prepareStatement(statement); - int parameterIndex = 1; - for (KvValue value : values) { - sqlHelper.setPreparedStatementValue(preparedStatement, parameterIndex, metaField.getType(), - value); - parameterIndex++; - } - return new AbstractCursor>>(errorHandler, preparedStatement - .executeQuery()) { - @Override - protected Tuple2> read(ResultSet resultSet) throws SQLException { - return new Tuple2<>( - resultSet.getInt(1), - sqlHelper.getResultSetKvValue( - metaField.getType(), - dataTypeProvider.getDataType(metaField.getType()), resultSet, 2 - ) - ); - } - }; - } finally { - dsl.configuration().connectionProvider().release(connection); - } - } - - protected abstract String getReadCollectionDidsAndProjectionWithFieldInStatement( - String schemaName, - String rootTableName, - String columnName, int valuesCount); - - @Override - @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"}, - justification = - "ResultSet is wrapped in a Cursor. It's iterated and closed in caller code") - public Cursor getAllCollectionDids(DSLContext dsl, MetaDatabase metaDatabase, - MetaCollection metaCollection) - throws SQLException { - - MetaDocPart rootDocPart = metaCollection.getMetaDocPartByTableRef(tableRefFactory.createRoot()); - if (rootDocPart == null) { - return new EmptyCursor<>(); - } - - String statement = getReadAllCollectionDidsStatement(metaDatabase.getIdentifier(), rootDocPart - .getIdentifier()); - Connection connection = dsl.configuration().connectionProvider().acquire(); - try { - PreparedStatement preparedStatement = connection.prepareStatement(statement); - return new DefaultDidCursor(errorHandler, preparedStatement.executeQuery()); - } finally { - dsl.configuration().connectionProvider().release(connection); - } - } - - protected abstract String getReadAllCollectionDidsStatement(String schemaName, - String rootTableName); - - @Override - public long countAll( - @Nonnull DSLContext dsl, - @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection - ) { - MetaDocPart rootDocPart = collection.getMetaDocPartByTableRef(tableRefFactory.createRoot()); - if (rootDocPart == null) { - return 0; - } - String statement = getReadCountAllStatement(database.getIdentifier(), rootDocPart - .getIdentifier()); - return sqlHelper.executeStatementWithResult(dsl, statement, Context.FETCH) - .get(0).into(Long.class); - } - - protected abstract String getReadCountAllStatement(String schema, String rootTableName); - - @Nonnull - @Override - public List getCollectionResultSets(@Nonnull DSLContext dsl, - @Nonnull MetaDatabase metaDatabase, @Nonnull MetaCollection metaCollection, - @Nonnull Cursor didCursor, int maxSize) throws SQLException { - Collection dids = didCursor.getNextBatch(maxSize); - return getCollectionResultSets(dsl, metaDatabase, metaCollection, dids); - } - - @Override - @SuppressFBWarnings(value = {"OBL_UNSATISFIED_OBLIGATION", "ODR_OPEN_DATABASE_RESOURCE"}, - justification = - "ResultSet is wrapped in a DocPartResult. It's iterated and closed in caller code") - public List getCollectionResultSets(DSLContext dsl, MetaDatabase metaDatabase, - MetaCollection metaCollection, Collection dids) throws SQLException { - ArrayList result = new ArrayList<>(); - Connection connection = dsl.configuration().connectionProvider().acquire(); - try { - Iterator metaDocPartIterator = metaCollection - .streamContainedMetaDocParts() - .sorted(TableRefComparator.MetaDocPart.DESC) - .iterator(); - while (metaDocPartIterator.hasNext()) { - MetaDocPart metaDocPart = metaDocPartIterator.next(); - String statament = getDocPartStatament(metaDatabase, metaDocPart, dids); - - PreparedStatement preparedStatement = connection.prepareStatement(statament); - result.add(new ResultSetDocPartResult(metaDataReadInterface, dataTypeProvider, errorHandler, - metaDocPart, preparedStatement.executeQuery(), sqlHelper)); - } - } finally { - dsl.configuration().connectionProvider().release(connection); - } - return result; - } - - protected abstract String getDocPartStatament(MetaDatabase metaDatabase, MetaDocPart metaDocPart, - Collection dids); - - @Override - public int getLastRowIdUsed(DSLContext dsl, MetaDatabase metaDatabase, - MetaCollection metaCollection, MetaDocPart metaDocPart) { - - String statement = getLastRowIdUsedStatement(metaDatabase, metaDocPart); - - Connection connection = dsl.configuration().connectionProvider().acquire(); - try (PreparedStatement preparedStatement = connection.prepareStatement(statement)) { - try (ResultSet rs = preparedStatement.executeQuery()) { - rs.next(); - int maxId = rs.getInt(1); - if (rs.wasNull()) { - return -1; - } - return maxId; - } - } catch (SQLException ex) { - throw errorHandler.handleException(Context.FETCH, ex); - } finally { - dsl.configuration().connectionProvider().release(connection); - } - } - - protected abstract String getLastRowIdUsedStatement(MetaDatabase metaDatabase, - MetaDocPart metaDocPart); - - protected String getPrimaryKeyColumnIdentifier(TableRef tableRef) { - if (tableRef.isRoot()) { - return DocPartTableFields.DID.fieldName; - } - return DocPartTableFields.RID.fieldName; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractStructureInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractStructureInterface.java deleted file mode 100644 index b32c1077..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractStructureInterface.java +++ /dev/null @@ -1,378 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.base.Preconditions; -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.backend.converters.jooq.DataTypeForKv; -import com.torodb.backend.meta.TorodbSchema; -import com.torodb.backend.tables.SemanticTable; -import com.torodb.core.TableRef; -import com.torodb.core.backend.IdentifierConstraints; -import com.torodb.core.exceptions.InvalidDatabaseException; -import com.torodb.core.exceptions.user.UserException; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex; -import com.torodb.core.transaction.metainf.MetaSnapshot; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.jooq.DSLContext; -import org.jooq.Meta; -import org.jooq.Schema; -import org.jooq.Table; -import org.jooq.lambda.tuple.Tuple2; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Optional; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import javax.inject.Inject; -import javax.inject.Singleton; - -@Singleton -public abstract class AbstractStructureInterface implements StructureInterface { - - private static final Logger LOGGER = - LogManager.getLogger(AbstractStructureInterface.class); - - private final DbBackendService dbBackend; - private final MetaDataReadInterface metaDataReadInterface; - private final SqlHelper sqlHelper; - private final IdentifierConstraints identifierConstraints; - - @Inject - public AbstractStructureInterface(DbBackendService dbBackend, - MetaDataReadInterface metaDataReadInterface, SqlHelper sqlHelper, - IdentifierConstraints identifierConstraints) { - this.dbBackend = dbBackend; - this.metaDataReadInterface = metaDataReadInterface; - this.sqlHelper = sqlHelper; - this.identifierConstraints = identifierConstraints; - } - - protected abstract void dropDatabase(DSLContext dsl, String dbIdentifier); - - @Override - public void dropDatabase(DSLContext dsl, MetaDatabase metaDatabase) { - Iterator metaCollectionIterator = metaDatabase.streamMetaCollections() - .iterator(); - while (metaCollectionIterator.hasNext()) { - MetaCollection metaCollection = metaCollectionIterator.next(); - Iterator metaDocPartIterator = metaCollection - .streamContainedMetaDocParts() - .sorted(TableRefComparator.MetaDocPart.DESC).iterator(); - while (metaDocPartIterator.hasNext()) { - MetaDocPart metaDocPart = metaDocPartIterator.next(); - String statement = getDropTableStatement(metaDatabase.getIdentifier(), metaDocPart - .getIdentifier()); - sqlHelper.executeUpdate(dsl, statement, Context.DROP_TABLE); - } - } - String statement = getDropSchemaStatement(metaDatabase.getIdentifier()); - sqlHelper.executeUpdate(dsl, statement, Context.DROP_SCHEMA); - } - - @Override - public void dropCollection(DSLContext dsl, String schemaName, MetaCollection metaCollection) { - Iterator metaDocPartIterator = metaCollection - .streamContainedMetaDocParts() - .sorted(TableRefComparator.MetaDocPart.DESC).iterator(); - while (metaDocPartIterator.hasNext()) { - MetaDocPart metaDocPart = metaDocPartIterator.next(); - String statement = getDropTableStatement(schemaName, metaDocPart.getIdentifier()); - sqlHelper.executeUpdate(dsl, statement, Context.DROP_TABLE); - } - } - - protected abstract String getDropTableStatement(String schemaName, String tableName); - - protected abstract String getDropSchemaStatement(String schemaName); - - @Override - public void renameCollection(DSLContext dsl, String fromSchemaName, MetaCollection fromCollection, - String toSchemaName, MetaCollection toCollection) { - Iterator metaDocPartIterator = fromCollection - .streamContainedMetaDocParts().iterator(); - while (metaDocPartIterator.hasNext()) { - MetaDocPart fromMetaDocPart = metaDocPartIterator.next(); - MetaDocPart toMetaDocPart = toCollection.getMetaDocPartByTableRef(fromMetaDocPart - .getTableRef()); - String renameStatement = getRenameTableStatement(fromSchemaName, fromMetaDocPart - .getIdentifier(), toMetaDocPart.getIdentifier()); - sqlHelper.executeUpdate(dsl, renameStatement, Context.RENAME_TABLE); - - Iterator metaDocPartIndexIterator = fromMetaDocPart - .streamIndexes().iterator(); - while (metaDocPartIndexIterator.hasNext()) { - MetaIdentifiedDocPartIndex fromMetaIndex = metaDocPartIndexIterator.next(); - MetaIdentifiedDocPartIndex toMetaIndex = toMetaDocPart.streamIndexes() - .filter(index -> index.hasSameColumns(fromMetaIndex)) - .findAny() - .get(); - - String renameIndexStatement = getRenameIndexStatement(fromSchemaName, fromMetaIndex - .getIdentifier(), toMetaIndex.getIdentifier()); - sqlHelper.executeUpdate(dsl, renameIndexStatement, Context.RENAME_INDEX); - } - - if (!fromSchemaName.equals(toSchemaName)) { - String setSchemaStatement = getSetTableSchemaStatement(fromSchemaName, fromMetaDocPart - .getIdentifier(), toSchemaName); - sqlHelper.executeUpdate(dsl, setSchemaStatement, Context.SET_TABLE_SCHEMA); - } - } - } - - protected abstract String getRenameTableStatement(String fromSchemaName, String fromTableName, - String toTableName); - - protected abstract String getRenameIndexStatement(String fromSchemaName, String fromIndexName, - String toIndexName); - - protected abstract String getSetTableSchemaStatement(String fromSchemaName, String fromTableName, - String toSchemaName); - - @Override - public void createIndex(DSLContext dsl, String indexName, - String schemaName, String tableName, - List> columnList, boolean unique - ) throws UserException { - if (!dbBackend.isOnDataInsertMode()) { - Preconditions.checkArgument(!columnList.isEmpty(), "Can not create index on 0 columns"); - - String statement = getCreateIndexStatement(indexName, schemaName, tableName, columnList, - unique); - - sqlHelper.executeUpdateOrThrow(dsl, statement, unique ? Context.ADD_UNIQUE_INDEX : - Context.CREATE_INDEX); - } - } - - protected abstract String getCreateIndexStatement(String indexName, String schemaName, - String tableName, List> columnList, boolean unique); - - @Override - public void dropIndex(DSLContext dsl, String schemaName, String indexName) { - String statement = getDropIndexStatement(schemaName, indexName); - - sqlHelper.executeUpdate(dsl, statement, Context.DROP_INDEX); - } - - @Override - public void dropAll(DSLContext dsl) { - dropUserDatabases(dsl, metaDataReadInterface); - metaDataReadInterface.getMetaTables().forEach(t -> dsl.dropTable(t).execute()); - } - - @Override - public void dropUserData(DSLContext dsl) { - dropUserDatabases(dsl, metaDataReadInterface); - metaDataReadInterface.getMetaTables().forEach(t -> - dsl.deleteFrom(t).execute() - ); - } - - /** - * This method drops all user databases (usually, db schemas). - * - * To implement this method, metainformation found on metatables can be acceded using the given - * {@link MetaDataReadInterface}. - * - * @param dsl - * @param metaReadInterface - */ - protected void dropUserDatabases(DSLContext dsl, MetaDataReadInterface metaDataReadInterface) { - metaDataReadInterface.readMetaDatabaseTable(dsl) - .forEach(dbRecord -> dropDatabase(dsl, dbRecord.getIdentifier())); - } - - @Override - public Optional findTorodbSchema(DSLContext dsl, Meta jooqMeta) { - Schema torodbSchema = null; - for (Schema schema : jooqMeta.getSchemas()) { - if (identifierConstraints.isSameIdentifier(TorodbSchema.IDENTIFIER, schema.getName())) { - torodbSchema = schema; - break; - } - } - return Optional.ofNullable(torodbSchema); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public void checkMetaDataTables(Schema torodbSchema) { - - List> metaTables = metaDataReadInterface.getMetaTables(); - for (SemanticTable metaTable : metaTables) { - String metaTableName = metaTable.getName(); - boolean metaTableFound = false; - for (Table table : torodbSchema.getTables()) { - if (identifierConstraints.isSameIdentifier(table.getName(), metaTableName)) { - metaTable.checkSemanticallyEquals(table); - metaTableFound = true; - LOGGER.debug(table + " found and check"); - } - } - if (!metaTableFound) { - throw new InvalidDatabaseException("The schema '" + TorodbSchema.IDENTIFIER + "'" - + " does not contain the expected meta table '" - + metaTableName + "'"); - } - } - - } - - protected String getDropIndexStatement(String schemaName, String indexName) { - StringBuilder sb = new StringBuilder() - .append("DROP INDEX ") - .append("\"").append(schemaName).append("\"") - .append(".") - .append("\"").append(indexName).append("\""); - String statement = sb.toString(); - return statement; - } - - @Override - public void createSchema(DSLContext dsl, String schemaName) { - String statement = getCreateSchemaStatement(schemaName); - sqlHelper.executeUpdate(dsl, statement, Context.CREATE_SCHEMA); - } - - protected abstract String getCreateSchemaStatement(String schemaName); - - @Override - public void createRootDocPartTable(DSLContext dsl, String schemaName, String tableName, - TableRef tableRef) { - String statement = getCreateDocPartTableStatement(schemaName, tableName, metaDataReadInterface - .getInternalFields(tableRef)); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - @Override - public void createDocPartTable(DSLContext dsl, String schemaName, String tableName, - TableRef tableRef, String foreignTableName) { - String statement = getCreateDocPartTableStatement(schemaName, tableName, metaDataReadInterface - .getInternalFields(tableRef)); - sqlHelper.executeStatement(dsl, statement, Context.CREATE_TABLE); - } - - protected abstract String getCreateDocPartTableStatement(String schemaName, String tableName, - Collection> fields); - - @Override - public Stream> streamRootDocPartTableIndexesCreation( - String schemaName, String tableName, TableRef tableRef) { - List> result = new ArrayList<>(1); - if (!dbBackend.isOnDataInsertMode()) { - String primaryKeyStatement = getAddDocPartTablePrimaryKeyStatement(schemaName, tableName, - metaDataReadInterface.getPrimaryKeyInternalFields(tableRef)); - - result.add(dsl -> { - sqlHelper.executeStatement(dsl, primaryKeyStatement, Context.ADD_UNIQUE_INDEX); - return metaDataReadInterface.getPrimaryKeyInternalFields(tableRef).stream().map(f -> f - .getName()).collect(Collectors.joining("_")) + "_pkey"; - }); - } - return result.stream(); - } - - @Override - public Stream> streamDocPartTableIndexesCreation(String schemaName, - String tableName, TableRef tableRef, String foreignTableName) { - List> result = new ArrayList<>(4); - if (!dbBackend.isOnDataInsertMode()) { - String primaryKeyStatement = getAddDocPartTablePrimaryKeyStatement(schemaName, tableName, - metaDataReadInterface.getPrimaryKeyInternalFields(tableRef)); - result.add((dsl) -> { - sqlHelper.executeStatement(dsl, primaryKeyStatement, Context.ADD_UNIQUE_INDEX); - return "rid_pkey"; - }); - } - - if (!dbBackend.isOnDataInsertMode()) { - if (dbBackend.includeForeignKeys()) { - String foreignKeyStatement = getAddDocPartTableForeignKeyStatement(schemaName, tableName, - metaDataReadInterface.getReferenceInternalFields(tableRef), - foreignTableName, metaDataReadInterface.getForeignInternalFields(tableRef)); - result.add((dsl) -> { - sqlHelper.executeStatement(dsl, foreignKeyStatement, Context.ADD_FOREIGN_KEY); - return metaDataReadInterface.getReferenceInternalFields(tableRef).stream().map(f -> f - .getName()).collect(Collectors.joining("_")) + "_fkey"; - }); - } else { - String foreignKeyIndexStatement = getCreateDocPartTableIndexStatement(schemaName, tableName, - metaDataReadInterface.getReferenceInternalFields(tableRef)); - result.add((dsl) -> { - sqlHelper.executeStatement(dsl, foreignKeyIndexStatement, Context.CREATE_INDEX); - return metaDataReadInterface.getReferenceInternalFields(tableRef).stream().map(f -> f - .getName()).collect(Collectors.joining("_")) + "_idx"; - }); - } - } - - if (!dbBackend.isOnDataInsertMode()) { - String readIndexStatement = getCreateDocPartTableIndexStatement(schemaName, tableName, - metaDataReadInterface.getReadInternalFields(tableRef)); - result.add((dsl) -> { - sqlHelper.executeStatement(dsl, readIndexStatement, Context.CREATE_INDEX); - return metaDataReadInterface.getReadInternalFields(tableRef).stream() - .map(f -> f.getName()).collect(Collectors.joining("_")) + "_idx"; - }); - } - - return result.stream(); - } - - @Override - public Stream> streamDataInsertFinishTasks(MetaSnapshot snapshot) { - return Collections.>emptySet().stream(); - } - - protected abstract String getAddDocPartTablePrimaryKeyStatement(String schemaName, - String tableName, - Collection> primaryKeyFields); - - protected abstract String getAddDocPartTableForeignKeyStatement(String schemaName, - String tableName, - Collection> referenceFields, String foreignTableName, - Collection> foreignFields); - - protected abstract String getCreateDocPartTableIndexStatement(String schemaName, String tableName, - Collection> indexedFields); - - @Override - public void addColumnToDocPartTable(DSLContext dsl, String schemaName, String tableName, - String columnName, DataTypeForKv dataType) { - String statement = getAddColumnToDocPartTableStatement(schemaName, tableName, columnName, - dataType); - - sqlHelper.executeStatement(dsl, statement, Context.ADD_COLUMN); - } - - protected abstract String getAddColumnToDocPartTableStatement(String schemaName, String tableName, - String columnName, DataTypeForKv dataType); -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/AbstractWriteInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/AbstractWriteInterface.java deleted file mode 100644 index a7e8897d..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/AbstractWriteInterface.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.core.cursors.Cursor; -import com.torodb.core.d2r.DocPartData; -import com.torodb.core.d2r.DocPartRow; -import com.torodb.core.exceptions.user.UserException; -import com.torodb.core.transaction.metainf.FieldType; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaField; -import com.torodb.core.transaction.metainf.MetaScalar; -import com.torodb.kvdocument.values.KvValue; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.jooq.DSLContext; -import org.jooq.exception.DataAccessException; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; - -import javax.annotation.Nonnull; -import javax.inject.Singleton; - -/** - * - */ -@Singleton -@SuppressFBWarnings("SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING") -public abstract class AbstractWriteInterface implements WriteInterface { - - private static final Logger LOGGER = LogManager.getLogger(AbstractWriteInterface.class); - - private final MetaDataReadInterface metaDataReadInterface; - private final ErrorHandler errorHandler; - private final SqlHelper sqlHelper; - - public AbstractWriteInterface(MetaDataReadInterface metaDataReadInterface, - ErrorHandler errorHandler, - SqlHelper sqlHelper) { - super(); - this.metaDataReadInterface = metaDataReadInterface; - this.errorHandler = errorHandler; - this.sqlHelper = sqlHelper; - } - - @Override - public long deleteCollectionDocParts(@Nonnull DSLContext dsl, - @Nonnull String schemaName, @Nonnull MetaCollection metaCollection, - @Nonnull Cursor didCursor - ) { - Connection c = dsl.configuration().connectionProvider().acquire(); - try { - int maxBatchSize = 100; - long deleted = 0; - - while (didCursor.hasNext()) { - Collection dids = didCursor.getNextBatch(maxBatchSize); - deleteCollectionDocParts(c, schemaName, metaCollection, dids); - deleted += dids.size(); - } - - return deleted; - } finally { - dsl.configuration().connectionProvider().release(c); - } - } - - @Override - public void deleteCollectionDocParts(@Nonnull DSLContext dsl, - @Nonnull String schemaName, @Nonnull MetaCollection metaCollection, - @Nonnull Collection dids - ) { - Connection c = dsl.configuration().connectionProvider().acquire(); - try { - deleteCollectionDocParts(c, schemaName, metaCollection, dids); - } finally { - dsl.configuration().connectionProvider().release(c); - } - } - - private void deleteCollectionDocParts(Connection c, String schemaName, - MetaCollection metaCollection, - Collection dids) { - Iterator iterator = metaCollection.streamContainedMetaDocParts() - .sorted(TableRefComparator.MetaDocPart.DESC).iterator(); - while (iterator.hasNext()) { - MetaDocPart metaDocPart = iterator.next(); - String statement = getDeleteDocPartsStatement(schemaName, metaDocPart.getIdentifier(), dids); - - sqlHelper.executeUpdate(c, statement, Context.DELETE); - - LOGGER.trace("Executed {}", statement); - } - } - - protected abstract String getDeleteDocPartsStatement(String schemaName, String tableName, - Collection dids); - - @Override - public void insertDocPartData(DSLContext dsl, String schemaName, DocPartData docPartData) throws - UserException { - Iterator docPartRowIterator = docPartData.iterator(); - if (!docPartRowIterator.hasNext()) { - return; - } - - try { - MetaDocPart metaDocPart = docPartData.getMetaDocPart(); - Iterator metaScalarIterator = docPartData.orderedMetaScalarIterator(); - Iterator metaFieldIterator = docPartData.orderedMetaFieldIterator(); - standardInsertDocPartData(dsl, schemaName, docPartData, metaDocPart, metaScalarIterator, - metaFieldIterator, docPartRowIterator); - } catch (DataAccessException ex) { - throw errorHandler.handleUserException(Context.INSERT, ex); - } - } - - protected int getMaxBatchSize() { - return 30; - } - - protected void standardInsertDocPartData(DSLContext dsl, String schemaName, - DocPartData docPartData, MetaDocPart metaDocPart, - Iterator metaScalarIterator, Iterator metaFieldIterator, - Iterator docPartRowIterator) throws UserException { - final int maxBatchSize = getMaxBatchSize(); - Collection> internalFields = metaDataReadInterface.getInternalFields( - metaDocPart); - List fieldTypeList = new ArrayList<>(); - String statement = getInsertDocPartDataStatement(schemaName, metaDocPart, metaFieldIterator, - metaScalarIterator, - internalFields, fieldTypeList); - assert assertFieldTypeListIsConsistent(docPartData, fieldTypeList) : - "fieldTypeList should be an ordered list of FieldType" - + " from MetaScalar and MetaField following the the ordering of " - + "DocPartData.orderedMetaScalarIterator and DocPartData.orderedMetaFieldIterator"; - - Connection connection = dsl.configuration().connectionProvider().acquire(); - try { - try (PreparedStatement preparedStatement = connection.prepareStatement(statement)) { - int docCounter = 0; - while (docPartRowIterator.hasNext()) { - DocPartRow docPartRow = docPartRowIterator.next(); - docCounter++; - int parameterIndex = 1; - for (InternalField internalField : internalFields) { - internalField.set(preparedStatement, parameterIndex, docPartRow); - parameterIndex++; - } - Iterator fieldTypeIterator = fieldTypeList.iterator(); - for (KvValue value : docPartRow.getScalarValues()) { - sqlHelper.setPreparedStatementNullableValue( - preparedStatement, parameterIndex++, - fieldTypeIterator.next(), - value); - } - for (KvValue value : docPartRow.getFieldValues()) { - sqlHelper.setPreparedStatementNullableValue( - preparedStatement, parameterIndex++, - fieldTypeIterator.next(), - value); - } - preparedStatement.addBatch(); - - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Added to insert {}", preparedStatement.toString()); - } - - if (docCounter % maxBatchSize == 0 || !docPartRowIterator.hasNext()) { - preparedStatement.executeBatch(); - - LOGGER.trace("Insertion batch executed"); - } - } - } - } catch (SQLException ex) { - throw errorHandler.handleUserException(Context.INSERT, ex); - } finally { - dsl.configuration().connectionProvider().release(connection); - } - } - - protected abstract String getInsertDocPartDataStatement( - String schemaName, - MetaDocPart metaDocPart, - Iterator metaFieldIterator, - Iterator metaScalarIterator, - Collection> internalFields, - List fieldTypeList); - - private boolean assertFieldTypeListIsConsistent(DocPartData docPartData, - List fieldTypeList) { - Iterator metaScalarIterator = docPartData.orderedMetaScalarIterator(); - Iterator metaFieldIterator = docPartData.orderedMetaFieldIterator(); - Iterator fieldTypeIterator = fieldTypeList.iterator(); - while (metaScalarIterator.hasNext()) { - if (!fieldTypeIterator.hasNext() || !metaScalarIterator.next().getType().equals( - fieldTypeIterator.next())) { - return false; - } - } - while (metaFieldIterator.hasNext()) { - if (!fieldTypeIterator.hasNext() || !metaFieldIterator.next().getType().equals( - fieldTypeIterator.next())) { - return false; - } - } - return true; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendBundleImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendBundleImpl.java deleted file mode 100644 index 205af85e..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/BackendBundleImpl.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.inject.assistedinject.Assisted; -import com.torodb.core.backend.BackendBundle; -import com.torodb.core.backend.BackendConnection; -import com.torodb.core.backend.BackendService; -import com.torodb.core.backend.ExclusiveWriteBackendTransaction; -import com.torodb.core.modules.AbstractBundle; -import com.torodb.core.supervision.Supervisor; - -import java.util.concurrent.ThreadFactory; - -import javax.inject.Inject; - -/** - * - */ -public class BackendBundleImpl extends AbstractBundle implements BackendBundle { - - private final DbBackendService lowLevelService; - private final BackendService backendService; - - @Inject - public BackendBundleImpl(DbBackendService lowLevelService, - BackendServiceImpl backendService, ThreadFactory threadFactory, - @Assisted Supervisor supervisor) { - super(threadFactory, supervisor); - this.lowLevelService = lowLevelService; - this.backendService = backendService; - } - - @Override - protected void postDependenciesStartUp() throws Exception { - lowLevelService.startAsync(); - lowLevelService.awaitRunning(); - - backendService.startAsync(); - backendService.awaitRunning(); - - try (BackendConnection conn = backendService.openConnection(); - ExclusiveWriteBackendTransaction trans = conn.openExclusiveWriteTransaction()) { - - trans.checkOrCreateMetaDataTables(); - trans.commit(); - } - } - - @Override - protected void preDependenciesShutDown() throws Exception { - backendService.stopAsync(); - backendService.awaitTerminated(); - - lowLevelService.stopAsync(); - lowLevelService.awaitTerminated(); - } - - @Override - public BackendService getBackendService() { - return backendService; - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendConfiguration.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendConfiguration.java deleted file mode 100644 index 8ffe0d68..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/BackendConfiguration.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import javax.annotation.Nonnegative; - -/** - * Configuration data for the backend - */ -public interface BackendConfiguration { - - long getCursorTimeout(); - - long getConnectionPoolTimeout(); - - int getConnectionPoolSize(); - - int getReservedReadPoolSize(); - - String getUsername(); - - String getPassword(); - - String getDbHost(); - - String getDbName(); - - @Nonnegative - int getDbPort(); - - boolean includeForeignKeys(); -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendConnectionImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendConnectionImpl.java deleted file mode 100644 index 4f90e60c..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/BackendConnectionImpl.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.base.Preconditions; -import com.torodb.backend.meta.SchemaUpdater; -import com.torodb.core.backend.BackendConnection; -import com.torodb.core.backend.BackendTransaction; -import com.torodb.core.backend.ExclusiveWriteBackendTransaction; -import com.torodb.core.backend.ReadOnlyBackendTransaction; -import com.torodb.core.backend.SharedWriteBackendTransaction; -import com.torodb.core.d2r.IdentifierFactory; -import com.torodb.core.d2r.ReservedIdGenerator; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -/** - * - */ -public class BackendConnectionImpl implements BackendConnection { - - private static final Logger LOGGER = LogManager.getLogger(BackendConnectionImpl.class); - private final BackendServiceImpl backend; - private final SqlInterface sqlInterface; - private boolean closed = false; - private final IdentifierFactory identifierFactory; - private final ReservedIdGenerator ridGenerator; - private BackendTransaction currentTransaction; - - public BackendConnectionImpl(BackendServiceImpl backend, - SqlInterface sqlInterface, ReservedIdGenerator ridGenerator, - IdentifierFactory identifierFactory) { - this.backend = backend; - this.sqlInterface = sqlInterface; - this.identifierFactory = identifierFactory; - this.ridGenerator = ridGenerator; - } - - @Override - public ReadOnlyBackendTransaction openReadOnlyTransaction() { - Preconditions.checkState(!closed, "This connection is closed"); - Preconditions.checkState(currentTransaction == null, - "Another transaction is currently under execution. Transaction is " + currentTransaction); - - ReadOnlyBackendTransactionImpl transaction = new ReadOnlyBackendTransactionImpl(sqlInterface, - this); - currentTransaction = transaction; - - return transaction; - } - - @Override - public SharedWriteBackendTransaction openSharedWriteTransaction() { - Preconditions.checkState(!closed, "This connection is closed"); - Preconditions.checkState(currentTransaction == null, - "Another transaction is currently under execution. Transaction is " + currentTransaction); - - SharedWriteBackendTransactionImpl transaction = new SharedWriteBackendTransactionImpl( - sqlInterface, this, identifierFactory); - currentTransaction = transaction; - - return transaction; - } - - @Override - public ExclusiveWriteBackendTransaction openExclusiveWriteTransaction() { - Preconditions.checkState(!closed, "This connection is closed"); - Preconditions.checkState(currentTransaction == null, - "Another transaction is currently under execution. Transaction is " + currentTransaction); - - ExclusiveWriteBackendTransactionImpl transaction = new ExclusiveWriteBackendTransactionImpl( - sqlInterface, this, identifierFactory, ridGenerator); - currentTransaction = transaction; - - return transaction; - } - - KvMetainfoHandler getMetaInfoHandler() { - return backend.getMetaInfoHandler(); - } - - SchemaUpdater getSchemaUpdater() { - return backend.getSchemaUpdater(); - } - - @Override - public void close() { - if (!closed) { - closed = true; - if (currentTransaction != null) { - currentTransaction.close(); - } - assert currentTransaction == null; - backend.onConnectionClosed(this); - } - } - - void onTransactionClosed(BackendTransaction transaction) { - if (currentTransaction == null) { - LOGGER.debug( - "Recived an on transaction close notification, but there is no current transaction"); - return; - } - if (currentTransaction != transaction) { - LOGGER.debug("Recived an on transaction close notification, but the recived transaction is " - + "not the same as the current one"); - return; - } - currentTransaction = null; - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendServiceImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendServiceImpl.java deleted file mode 100644 index c2fdb2ff..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/BackendServiceImpl.java +++ /dev/null @@ -1,290 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.backend.meta.SchemaUpdater; -import com.torodb.core.annotations.TorodbIdleService; -import com.torodb.core.backend.BackendConnection; -import com.torodb.core.backend.BackendService; -import com.torodb.core.concurrent.ConcurrentToolsFactory; -import com.torodb.core.concurrent.StreamExecutor; -import com.torodb.core.d2r.IdentifierFactory; -import com.torodb.core.d2r.ReservedIdGenerator; -import com.torodb.core.exceptions.SystemException; -import com.torodb.core.exceptions.ToroRuntimeException; -import com.torodb.core.exceptions.user.UserException; -import com.torodb.core.retrier.Retrier; -import com.torodb.core.retrier.Retrier.Hint; -import com.torodb.core.retrier.RetrierGiveUpException; -import com.torodb.core.services.IdleTorodbService; -import com.torodb.core.transaction.RollbackException; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaDocPartIndexColumn; -import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex; -import com.torodb.core.transaction.metainf.MetaSnapshot; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.jooq.DSLContext; -import org.jooq.lambda.tuple.Tuple2; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.ThreadFactory; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.stream.Stream; - -import javax.inject.Inject; - -/** - * - */ -public class BackendServiceImpl extends IdleTorodbService implements BackendService { - - private static final Logger LOGGER = LogManager.getLogger(BackendServiceImpl.class); - - private final DbBackendService dbBackendService; - private final SqlInterface sqlInterface; - private final ReservedIdGenerator ridGenerator; - private final Retrier retrier; - private final StreamExecutor streamExecutor; - private final KvMetainfoHandler metainfoHandler; - private final IdentifierFactory identifierFactory; - private final SchemaUpdater schemaUpdater; - - /** - * @param threadFactory the thread factory that will be used to create the startup and - * shutdown threads - * @param dbBackendService - * @param sqlInterface - * @param schemaUpdater - * @param metainfoHandler - * @param identifierFactory - * @param ridGenerator - * @param retrier - * @param concurrentToolsFactory - */ - @Inject - public BackendServiceImpl(@TorodbIdleService ThreadFactory threadFactory, - ReservedIdGenerator ridGenerator, DbBackendService dbBackendService, - SqlInterface sqlInterface, IdentifierFactory identifierFactory, - Retrier retrier, - ConcurrentToolsFactory concurrentToolsFactory, - KvMetainfoHandler metainfoHandler, SchemaUpdater schemaUpdater) { - super(threadFactory); - - this.dbBackendService = dbBackendService; - this.sqlInterface = sqlInterface; - this.ridGenerator = ridGenerator; - this.retrier = retrier; - this.streamExecutor = concurrentToolsFactory.createStreamExecutor("backend-inner-jobs", true); - this.metainfoHandler = metainfoHandler; - this.identifierFactory = identifierFactory; - this.schemaUpdater = schemaUpdater; - } - - @Override - public BackendConnection openConnection() { - return new BackendConnectionImpl(this, sqlInterface, ridGenerator, identifierFactory); - } - - @Override - public void enableDataImportMode(MetaSnapshot snapshot) throws RollbackException { - if (!sqlInterface.getDbBackend().isOnDataInsertMode()) { - if (snapshot.streamMetaDatabases().findAny().isPresent()) { - throw new IllegalStateException("Can not disable indexes if any database exists"); - } - - sqlInterface.getDbBackend().enableDataInsertMode(); - } - } - - @Override - public void disableDataImportMode(MetaSnapshot snapshot) throws RollbackException { - if (sqlInterface.getDbBackend().isOnDataInsertMode()) { - sqlInterface.getDbBackend().disableDataInsertMode(); - - //create internal indexes - Stream> createInternalIndexesJobs = snapshot.streamMetaDatabases() - .flatMap( - db -> db.streamMetaCollections().flatMap( - col -> col.streamContainedMetaDocParts().flatMap( - docPart -> enableInternalIndexJobs(db, col, docPart) - ) - ) - ); - - //create indexes - Stream> createIndexesJobs = snapshot.streamMetaDatabases().flatMap( - db -> db.streamMetaCollections().flatMap( - col -> enableIndexJobs(db, col) - ) - ); - - //backend specific jobs - Stream> backendSpecificJobs = sqlInterface.getStructureInterface() - .streamDataInsertFinishTasks(snapshot).map(job -> { - return (Consumer) dsl -> { - String index = job.apply(dsl); - LOGGER.info("Task {} completed", index); - }; - }); - Stream> jobs = Stream - .concat(createInternalIndexesJobs, createIndexesJobs); - jobs = Stream.concat(jobs, backendSpecificJobs); - Stream runnables = jobs.map(this::dslConsumerToRunnable); - - streamExecutor.executeRunnables(runnables) - .join(); - } - } - - private Stream> enableInternalIndexJobs(MetaDatabase db, MetaCollection col, - MetaDocPart docPart) { - StructureInterface structureInterface = sqlInterface.getStructureInterface(); - - Stream> consumerStream; - - if (docPart.getTableRef().isRoot()) { - consumerStream = structureInterface.streamRootDocPartTableIndexesCreation( - db.getIdentifier(), - docPart.getIdentifier(), - docPart.getTableRef() - ); - } else { - MetaDocPart parentDocPart = col.getMetaDocPartByTableRef( - docPart.getTableRef().getParent().get() - ); - assert parentDocPart != null; - consumerStream = structureInterface.streamDocPartTableIndexesCreation( - db.getIdentifier(), - docPart.getIdentifier(), - docPart.getTableRef(), - parentDocPart.getIdentifier() - ); - } - - return consumerStream.map(job -> { - return (Consumer) dsl -> { - String index = job.apply(dsl); - LOGGER.info("Created internal index {} for table {}", index, docPart.getIdentifier()); - }; - }); - } - - private Stream> enableIndexJobs(MetaDatabase db, MetaCollection col) { - List> consumerList = new ArrayList<>(); - - Iterator docPartIterator = col.streamContainedMetaDocParts().iterator(); - while (docPartIterator.hasNext()) { - MetaDocPart docPart = docPartIterator.next(); - - Iterator docPartIndexIterator = docPart.streamIndexes() - .iterator(); - while (docPartIndexIterator.hasNext()) { - MetaIdentifiedDocPartIndex docPartIndex = docPartIndexIterator.next(); - - consumerList.add(createIndexJob(db, docPart, docPartIndex)); - } - } - - return consumerList.stream(); - } - - private Consumer createIndexJob(MetaDatabase db, MetaDocPart docPart, - MetaIdentifiedDocPartIndex docPartIndex) { - return dsl -> { - List> columnList = new ArrayList<>(docPartIndex.size()); - for (Iterator indexColumnIterator = docPartIndex - .iteratorColumns(); indexColumnIterator.hasNext();) { - MetaDocPartIndexColumn indexColumn = indexColumnIterator.next(); - columnList.add(new Tuple2<>(indexColumn.getIdentifier(), indexColumn.getOrdering() - .isAscending())); - } - - try { - sqlInterface.getStructureInterface().createIndex( - dsl, docPartIndex.getIdentifier(), db.getIdentifier(), docPart.getIdentifier(), - columnList, - docPartIndex.isUnique()); - } catch (UserException userException) { - throw new SystemException(userException); - } - LOGGER.info("Created index {} for table {}", docPartIndex.getIdentifier(), docPart - .getIdentifier()); - }; - } - - private Runnable dslConsumerToRunnable(Consumer consumer) { - return () -> { - try { - retrier.retry(() -> { - try (Connection connection = sqlInterface.getDbBackend().createWriteConnection()) { - DSLContext dsl = sqlInterface.getDslContextFactory() - .createDslContext(connection); - - consumer.accept(dsl); - connection.commit(); - return null; - } catch (SQLException ex) { - throw sqlInterface.getErrorHandler().handleException(Context.CREATE_INDEX, ex); - } - }, Hint.CRITICAL); - } catch (RetrierGiveUpException ex) { - throw new ToroRuntimeException(ex); - } - }; - } - - @Override - protected void startUp() throws Exception { - LOGGER.debug("Starting backend..."); - - streamExecutor.startAsync(); - streamExecutor.awaitRunning(); - - LOGGER.trace("Waiting for {} to be running...", dbBackendService); - dbBackendService.awaitRunning(); - - LOGGER.debug("Backend started"); - } - - @Override - protected void shutDown() throws Exception { - streamExecutor.stopAsync(); - streamExecutor.awaitTerminated(); - } - - void onConnectionClosed(BackendConnectionImpl connection) { - } - - KvMetainfoHandler getMetaInfoHandler() { - return metainfoHandler; - } - - SchemaUpdater getSchemaUpdater() { - return schemaUpdater; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/BackendTransactionImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/BackendTransactionImpl.java deleted file mode 100644 index 6bb02815..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/BackendTransactionImpl.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.collect.Multimap; -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.core.backend.BackendCursor; -import com.torodb.core.backend.BackendTransaction; -import com.torodb.core.backend.EmptyBackendCursor; -import com.torodb.core.backend.MetaInfoKey; -import com.torodb.core.cursors.Cursor; -import com.torodb.core.cursors.EmptyCursor; -import com.torodb.core.exceptions.InvalidDatabaseException; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaField; -import com.torodb.kvdocument.values.KvValue; -import org.jooq.DSLContext; -import org.jooq.lambda.tuple.Tuple2; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.Optional; - -/** - * - */ -public abstract class BackendTransactionImpl implements BackendTransaction { - - private boolean closed = false; - private final Connection connection; - private final DSLContext dsl; - private final SqlInterface sqlInterface; - private final BackendConnectionImpl backendConnection; - - public BackendTransactionImpl(Connection connection, SqlInterface sqlInterface, - BackendConnectionImpl backendConnection) { - this.connection = connection; - this.dsl = sqlInterface.getDslContextFactory().createDslContext(connection); - this.sqlInterface = sqlInterface; - this.backendConnection = backendConnection; - } - - boolean isClosed() { - return closed; - } - - Connection getConnection() { - return connection; - } - - DSLContext getDsl() { - return dsl; - } - - SqlInterface getSqlInterface() { - return sqlInterface; - } - - BackendConnectionImpl getBackendConnection() { - return backendConnection; - } - - @Override - public long getDatabaseSize(MetaDatabase db) { - return sqlInterface.getMetaDataReadInterface().getDatabaseSize(getDsl(), db); - } - - @Override - public long countAll(MetaDatabase db, MetaCollection col) { - return sqlInterface.getReadInterface().countAll(getDsl(), db, col); - } - - @Override - public long getCollectionSize(MetaDatabase db, MetaCollection col) { - return sqlInterface.getMetaDataReadInterface().getCollectionSize(getDsl(), db, col); - } - - @Override - public long getDocumentsSize(MetaDatabase db, MetaCollection col) { - return sqlInterface.getMetaDataReadInterface().getDocumentsSize(getDsl(), db, col); - } - - @Override - public BackendCursor findAll(MetaDatabase db, MetaCollection col) { - try { - Cursor allDids = sqlInterface.getReadInterface().getAllCollectionDids(dsl, db, col); - return new LazyBackendCursor(sqlInterface, allDids, dsl, db, col); - } catch (SQLException ex) { - throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex); - } - } - - @Override - public BackendCursor findByField(MetaDatabase db, MetaCollection col, MetaDocPart docPart, - MetaField field, KvValue value) { - try { - Cursor allDids = sqlInterface.getReadInterface().getCollectionDidsWithFieldEqualsTo( - dsl, db, col, docPart, field, value); - return new LazyBackendCursor(sqlInterface, allDids, dsl, db, col); - } catch (SQLException ex) { - throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex); - } - } - - @Override - public BackendCursor findByFieldIn(MetaDatabase db, MetaCollection col, MetaDocPart docPart, - Multimap> valuesMultimap) { - try { - if (valuesMultimap.isEmpty()) { - return new EmptyBackendCursor(); - } - Cursor allDids = sqlInterface.getReadInterface().getCollectionDidsWithFieldsIn(dsl, - db, col, docPart, valuesMultimap); - return new LazyBackendCursor(sqlInterface, allDids, dsl, db, col); - } catch (SQLException ex) { - throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex); - } - } - - @Override - public Cursor>> findByFieldInProjection(MetaDatabase db, - MetaCollection col, MetaDocPart docPart, - Multimap> valuesMultimap) { - try { - if (valuesMultimap.isEmpty()) { - return new EmptyCursor<>(); - } - return sqlInterface.getReadInterface() - .getCollectionDidsAndProjectionWithFieldsIn(dsl, db, col, docPart, valuesMultimap); - } catch (SQLException ex) { - throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex); - } - } - - @Override - public BackendCursor fetch(MetaDatabase db, MetaCollection col, Cursor didCursor) { - return new LazyBackendCursor(sqlInterface, didCursor, dsl, db, col); - } - - @Override - public Optional> readMetaInfo(MetaInfoKey key) throws - IllegalArgumentException { - return getBackendConnection().getMetaInfoHandler().readMetaInfo(getDsl(), key); - } - - @Override - public void checkMetaDataTables() throws InvalidDatabaseException { - getSqlInterface().getStructureInterface().checkMetaDataTables(getDsl()); - } - - @Override - public void rollback() { - try { - connection.rollback(); - } catch (SQLException ex) { - sqlInterface.getErrorHandler().handleException(Context.ROLLBACK, ex); - } - } - - @Override - public void close() { - if (!closed) { - closed = true; - backendConnection.onTransactionClosed(this); - try { - connection.rollback(); - connection.close(); - } catch (SQLException ex) { - sqlInterface.getErrorHandler().handleException(Context.CLOSE, ex); - } finally { - dsl.close(); - } - } - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DataTypeProvider.java b/engine/backend/common/src/main/java/com/torodb/backend/DataTypeProvider.java deleted file mode 100644 index e53dae35..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/DataTypeProvider.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.backend.converters.jooq.DataTypeForKv; -import com.torodb.core.transaction.metainf.FieldType; -import org.jooq.SQLDialect; - -import javax.annotation.Nonnull; - -public interface DataTypeProvider { - - @Nonnull - DataTypeForKv getDataType(FieldType type); - - @Nonnull - SQLDialect getDialect(); -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DbBackendService.java b/engine/backend/common/src/main/java/com/torodb/backend/DbBackendService.java deleted file mode 100644 index 69142cd8..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/DbBackendService.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.services.TorodbService; - -import java.sql.Connection; - -import javax.sql.DataSource; - -public interface DbBackendService extends TorodbService { - - public DataSource getSessionDataSource(); - - public DataSource getSystemDataSource(); - - public DataSource getGlobalCursorDatasource(); - - public void disableDataInsertMode(); - - public void enableDataInsertMode(); - - public long getDefaultCursorTimeout(); - - public boolean isOnDataInsertMode(); - - public boolean includeForeignKeys(); - - public Connection createSystemConnection(); - - public Connection createReadOnlyConnection(); - - public Connection createWriteConnection(); -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DefaultDidCursor.java b/engine/backend/common/src/main/java/com/torodb/backend/DefaultDidCursor.java deleted file mode 100644 index d61f2d2e..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/DefaultDidCursor.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.annotation.Nonnull; - -public class DefaultDidCursor extends AbstractCursor { - - public DefaultDidCursor(@Nonnull ErrorHandler errorHandler, @Nonnull ResultSet resultSet) { - super(errorHandler, resultSet); - } - - @Override - protected Integer read(ResultSet resultSet) throws SQLException { - return resultSet.getInt(1); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DefaultDocPartResultCursor.java b/engine/backend/common/src/main/java/com/torodb/backend/DefaultDocPartResultCursor.java deleted file mode 100644 index 5de847ef..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/DefaultDocPartResultCursor.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.base.Preconditions; -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.core.cursors.Cursor; -import com.torodb.core.d2r.DocPartResult; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import org.jooq.DSLContext; - -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.function.Consumer; - -import javax.annotation.Nonnull; - -/** - * - */ -public class DefaultDocPartResultCursor implements Cursor { - - private static final int BATCH_SIZE = 1000; - - private final SqlInterface sqlInterface; - private final Cursor didCursor; - private final DSLContext dsl; - private final MetaDatabase metaDatabase; - private final MetaCollection metaCollection; - - public DefaultDocPartResultCursor( - @Nonnull SqlInterface sqlInterface, - @Nonnull Cursor didCursor, - @Nonnull DSLContext dsl, - @Nonnull MetaDatabase metaDatabase, - @Nonnull MetaCollection metaCollection) { - this.sqlInterface = sqlInterface; - this.didCursor = didCursor; - this.dsl = dsl; - this.metaDatabase = metaDatabase; - this.metaCollection = metaCollection; - } - - @Override - public boolean hasNext() { - return didCursor.hasNext(); - } - - @Override - public DocPartResult next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - return getNextBatch(1).get(0); - } - - @Override - public List getRemaining() { - List allDocuments = new ArrayList<>(); - - List readedDocuments; - while (didCursor.hasNext()) { - readedDocuments = getNextBatch(BATCH_SIZE); - allDocuments.addAll(readedDocuments); - } - - return allDocuments; - } - - @Override - public List getNextBatch(int maxResults) { - Preconditions.checkArgument(maxResults > 0, "max results must be at least 1, but " + maxResults - + " was recived"); - - if (!didCursor.hasNext()) { - return Collections.emptyList(); - } - - try { - return sqlInterface.getReadInterface().getCollectionResultSets( - dsl, metaDatabase, metaCollection, didCursor, maxResults - ); - } catch (SQLException ex) { - throw sqlInterface.getErrorHandler().handleException(Context.FETCH, ex); - } - } - - @Override - public void forEachRemaining(Consumer action) { - getRemaining().forEach(action); - } - - @Override - public void close() { - didCursor.close(); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DelegatorField.java b/engine/backend/common/src/main/java/com/torodb/backend/DelegatorField.java deleted file mode 100644 index b151f2de..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/DelegatorField.java +++ /dev/null @@ -1,1501 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - - -import org.jooq.BetweenAndStep; -import org.jooq.Binding; -import org.jooq.Comparator; -import org.jooq.Condition; -import org.jooq.Configuration; -import org.jooq.Converter; -import org.jooq.DataType; -import org.jooq.DatePart; -import org.jooq.Field; -import org.jooq.QuantifiedSelect; -import org.jooq.Record1; -import org.jooq.Result; -import org.jooq.Select; -import org.jooq.SortField; -import org.jooq.SortOrder; -import org.jooq.WindowIgnoreNullsStep; -import org.jooq.WindowPartitionByStep; - -import java.math.BigDecimal; -import java.util.Collection; -import java.util.Map; - -/** - * - */ -@SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder") -public class DelegatorField implements Field { - - private static final long serialVersionUID = 4060506762956191613L; - - private final Field delegate; - - public DelegatorField(Field delegate) { - this.delegate = delegate; - } - - @Override - public String getName() { - return delegate.getName(); - } - - @Override - public String getComment() { - return delegate.getComment(); - } - - @Override - public Converter getConverter() { - return delegate.getConverter(); - } - - @Override - public Binding getBinding() { - return delegate.getBinding(); - } - - @Override - public Class getType() { - return delegate.getType(); - } - - @Override - public DataType getDataType() { - return delegate.getDataType(); - } - - @Override - public DataType getDataType(Configuration configuration) { - return delegate.getDataType(configuration); - } - - @Override - public Field as(String alias) { - return delegate.as(alias); - } - - @Override - public Field as(Field otherField) { - return delegate.as(otherField); - } - - @Override - public boolean equals(Object other) { - return delegate.equals(other); - } - - @Override - public int hashCode() { - return delegate.hashCode(); - } - - @Override - public Field cast(Field field) { - return delegate.cast(field); - } - - @Override - public Field cast(DataType type) { - return delegate.cast(type); - } - - @Override - public Field cast(Class type) { - return delegate.cast(type); - } - - @Override - public Field coerce(Field field) { - return delegate.coerce(field); - } - - @Override - public Field coerce(DataType type) { - return delegate.coerce(type); - } - - @Override - public Field coerce(Class type) { - return delegate.coerce(type); - } - - @Override - public SortField asc() { - return delegate.asc(); - } - - @Override - public SortField desc() { - return delegate.desc(); - } - - @Override - public SortField sort(SortOrder order) { - return delegate.sort(order); - } - - @Override - public SortField sortAsc(Collection sortList) { - return delegate.sortAsc(sortList); - } - - @SuppressWarnings("unchecked") - @Override - public SortField sortAsc(T... sortList) { - return delegate.sortAsc(sortList); - } - - @Override - public SortField sortDesc(Collection sortList) { - return delegate.sortDesc(sortList); - } - - @SuppressWarnings("unchecked") - @Override - public SortField sortDesc(T... sortList) { - return delegate.sortDesc(sortList); - } - - @Override - public SortField sort(Map sortMap) { - return delegate.sort(sortMap); - } - - @Override - public Field neg() { - return delegate.neg(); - } - - @Override - public Field add(Number value) { - return delegate.add(value); - } - - @Override - public Field add(Field value) { - return delegate.add(value); - } - - @Override - public Field plus(Number value) { - return delegate.plus(value); - } - - @Override - public Field plus(Field value) { - return delegate.plus(value); - } - - @Override - public Field sub(Number value) { - return delegate.sub(value); - } - - @Override - public Field sub(Field value) { - return delegate.sub(value); - } - - @Override - public Field subtract(Number value) { - return delegate.subtract(value); - } - - @Override - public Field subtract(Field value) { - return delegate.subtract(value); - } - - @Override - public Field minus(Number value) { - return delegate.minus(value); - } - - @Override - public Field minus(Field value) { - return delegate.minus(value); - } - - @Override - public Field mul(Number value) { - return delegate.mul(value); - } - - @Override - public Field mul(Field value) { - return delegate.mul(value); - } - - @Override - public Field multiply(Number value) { - return delegate.multiply(value); - } - - @Override - public Field multiply(Field value) { - return delegate.multiply(value); - } - - @Override - public Field div(Number value) { - return delegate.div(value); - } - - @Override - public Field div(Field value) { - return delegate.div(value); - } - - @Override - public Field divide(Number value) { - return delegate.divide(value); - } - - @Override - public Field divide(Field value) { - return delegate.divide(value); - } - - @Override - public Field mod(Number value) { - return delegate.mod(value); - } - - @Override - public Field mod(Field value) { - return delegate.mod(value); - } - - @Override - public Field modulo(Number value) { - return delegate.modulo(value); - } - - @Override - public Field modulo(Field value) { - return delegate.modulo(value); - } - - @Override - public Field bitNot() { - return delegate.bitNot(); - } - - @Override - public Field bitAnd(T value) { - return delegate.bitAnd(value); - } - - @Override - public Field bitAnd(Field value) { - return delegate.bitAnd(value); - } - - @Override - public Field bitNand(T value) { - return delegate.bitNand(value); - } - - @Override - public Field bitNand(Field value) { - return delegate.bitNand(value); - } - - @Override - public Field bitOr(T value) { - return delegate.bitOr(value); - } - - @Override - public Field bitOr(Field value) { - return delegate.bitOr(value); - } - - @Override - public Field bitNor(T value) { - return delegate.bitNor(value); - } - - @Override - public Field bitNor(Field value) { - return delegate.bitNor(value); - } - - @Override - public Field bitXor(T value) { - return delegate.bitXor(value); - } - - @Override - public Field bitXor(Field value) { - return delegate.bitXor(value); - } - - @Override - public Field bitXNor(T value) { - return delegate.bitXNor(value); - } - - @Override - public Field bitXNor(Field value) { - return delegate.bitXNor(value); - } - - @Override - public Field shl(Number value) { - return delegate.shl(value); - } - - @Override - public Field shl(Field value) { - return delegate.shl(value); - } - - @Override - public Field shr(Number value) { - return delegate.shr(value); - } - - @Override - public Field shr(Field value) { - return delegate.shr(value); - } - - @Override - public Condition isNull() { - return delegate.isNull(); - } - - @Override - public Condition isNotNull() { - return delegate.isNotNull(); - } - - @Override - public Condition isDistinctFrom(T value) { - return delegate.isDistinctFrom(value); - } - - @Override - public Condition isDistinctFrom(Field field) { - return delegate.isDistinctFrom(field); - } - - @Override - public Condition isNotDistinctFrom(T value) { - return delegate.isNotDistinctFrom(value); - } - - @Override - public Condition isNotDistinctFrom(Field field) { - return delegate.isNotDistinctFrom(field); - } - - @Override - public Condition likeRegex(String pattern) { - return delegate.likeRegex(pattern); - } - - @Override - public Condition likeRegex(Field pattern) { - return delegate.likeRegex(pattern); - } - - @Override - public Condition notLikeRegex(String pattern) { - return delegate.notLikeRegex(pattern); - } - - @Override - public Condition notLikeRegex(Field pattern) { - return delegate.notLikeRegex(pattern); - } - - @Override - public Condition like(Field value) { - return delegate.like(value); - } - - @Override - public Condition like(Field value, char escape) { - return delegate.like(value, escape); - } - - @Override - public Condition like(String value) { - return delegate.like(value); - } - - @Override - public Condition like(String value, char escape) { - return delegate.like(value, escape); - } - - @Override - public Condition likeIgnoreCase(Field field) { - return delegate.likeIgnoreCase(field); - } - - @Override - public Condition likeIgnoreCase(Field field, char escape) { - return delegate.likeIgnoreCase(field, escape); - } - - @Override - public Condition likeIgnoreCase(String value) { - return delegate.likeIgnoreCase(value); - } - - @Override - public Condition likeIgnoreCase(String value, char escape) { - return delegate.likeIgnoreCase(value, escape); - } - - @Override - public Condition notLike(Field field) { - return delegate.notLike(field); - } - - @Override - public Condition notLike(Field field, char escape) { - return delegate.notLike(field, escape); - } - - @Override - public Condition notLike(String value) { - return delegate.notLike(value); - } - - @Override - public Condition notLike(String value, char escape) { - return delegate.notLike(value, escape); - } - - @Override - public Condition notLikeIgnoreCase(Field field) { - return delegate.notLikeIgnoreCase(field); - } - - @Override - public Condition notLikeIgnoreCase(Field field, char escape) { - return delegate.notLikeIgnoreCase(field, escape); - } - - @Override - public Condition notLikeIgnoreCase(String value) { - return delegate.notLikeIgnoreCase(value); - } - - @Override - public Condition notLikeIgnoreCase(String value, char escape) { - return delegate.notLikeIgnoreCase(value, escape); - } - - @Override - public Condition contains(T value) { - return delegate.contains(value); - } - - @Override - public Condition contains(Field value) { - return delegate.contains(value); - } - - @Override - public Condition startsWith(T value) { - return delegate.startsWith(value); - } - - @Override - public Condition startsWith(Field value) { - return delegate.startsWith(value); - } - - @Override - public Condition endsWith(T value) { - return delegate.endsWith(value); - } - - @Override - public Condition endsWith(Field value) { - return delegate.endsWith(value); - } - - @Override - public Condition in(Collection values) { - return delegate.in(values); - } - - @Override - public Condition in(Result> result) { - return delegate.in(result); - } - - @SuppressWarnings("unchecked") - @Override - public Condition in(T... values) { - return delegate.in(values); - } - - @Override - public Condition in(Field... values) { - return delegate.in(values); - } - - @Override - public Condition in(Select> query) { - return delegate.in(query); - } - - @Override - public Condition notIn(Collection values) { - return delegate.notIn(values); - } - - @Override - public Condition notIn(Result> result) { - return delegate.notIn(result); - } - - @SuppressWarnings("unchecked") - @Override - public Condition notIn(T... values) { - return delegate.notIn(values); - } - - @Override - public Condition notIn(Field... values) { - return delegate.notIn(values); - } - - @Override - public Condition notIn(Select> query) { - return delegate.notIn(query); - } - - @Override - public Condition between(T minValue, T maxValue) { - return delegate.between(minValue, maxValue); - } - - @Override - public Condition between(Field minValue, Field maxValue) { - return delegate.between(minValue, maxValue); - } - - @Override - public Condition betweenSymmetric(T minValue, T maxValue) { - return delegate.betweenSymmetric(minValue, maxValue); - } - - @Override - public Condition betweenSymmetric(Field minValue, Field maxValue) { - return delegate.betweenSymmetric(minValue, maxValue); - } - - @Override - public Condition notBetween(T minValue, T maxValue) { - return delegate.notBetween(minValue, maxValue); - } - - @Override - public Condition notBetween(Field minValue, Field maxValue) { - return delegate.notBetween(minValue, maxValue); - } - - @Override - public Condition notBetweenSymmetric(T minValue, T maxValue) { - return delegate.notBetweenSymmetric(minValue, maxValue); - } - - @Override - public Condition notBetweenSymmetric(Field minValue, Field maxValue) { - return delegate.notBetweenSymmetric(minValue, maxValue); - } - - @Override - public BetweenAndStep between(T minValue) { - return delegate.between(minValue); - } - - @Override - public BetweenAndStep between(Field minValue) { - return delegate.between(minValue); - } - - @Override - public BetweenAndStep betweenSymmetric(T minValue) { - return delegate.betweenSymmetric(minValue); - } - - @Override - public BetweenAndStep betweenSymmetric(Field minValue) { - return delegate.betweenSymmetric(minValue); - } - - @Override - public BetweenAndStep notBetween(T minValue) { - return delegate.notBetween(minValue); - } - - @Override - public BetweenAndStep notBetween(Field minValue) { - return delegate.notBetween(minValue); - } - - @Override - public BetweenAndStep notBetweenSymmetric(T minValue) { - return delegate.notBetweenSymmetric(minValue); - } - - @Override - public BetweenAndStep notBetweenSymmetric(Field minValue) { - return delegate.notBetweenSymmetric(minValue); - } - - @Override - public Condition compare(Comparator comparator, T value) { - return delegate.compare(comparator, value); - } - - @Override - public Condition compare(Comparator comparator, Field field) { - return delegate.compare(comparator, field); - } - - @Override - public Condition compare(Comparator comparator, Select> query) { - return delegate.compare(comparator, query); - } - - @Override - public Condition compare(Comparator comparator, QuantifiedSelect> query) { - return delegate.compare(comparator, query); - } - - @Override - public Condition equal(T value) { - return delegate.equal(value); - } - - @Override - public Condition equal(Field field) { - return delegate.equal(field); - } - - @Override - public Condition equal(Select> query) { - return delegate.equal(query); - } - - @Override - public Condition equal(QuantifiedSelect> query) { - return delegate.equal(query); - } - - @Override - public Condition eq(T value) { - return delegate.eq(value); - } - - @Override - public Condition eq(Field field) { - return delegate.eq(field); - } - - @Override - public Condition eq(Select> query) { - return delegate.eq(query); - } - - @Override - public Condition eq(QuantifiedSelect> query) { - return delegate.eq(query); - } - - @Override - public Condition notEqual(T value) { - return delegate.notEqual(value); - } - - @Override - public Condition notEqual(Field field) { - return delegate.notEqual(field); - } - - @Override - public Condition notEqual(Select> query) { - return delegate.notEqual(query); - } - - @Override - public Condition notEqual(QuantifiedSelect> query) { - return delegate.notEqual(query); - } - - @Override - public Condition ne(T value) { - return delegate.ne(value); - } - - @Override - public Condition ne(Field field) { - return delegate.ne(field); - } - - @Override - public Condition ne(Select> query) { - return delegate.ne(query); - } - - @Override - public Condition ne(QuantifiedSelect> query) { - return delegate.ne(query); - } - - @Override - public Condition lessThan(T value) { - return delegate.lessThan(value); - } - - @Override - public Condition lessThan(Field field) { - return delegate.lessThan(field); - } - - @Override - public Condition lessThan(Select> query) { - return delegate.lessThan(query); - } - - @Override - public Condition lessThan(QuantifiedSelect> query) { - return delegate.lessThan(query); - } - - @Override - public Condition lt(T value) { - return delegate.lt(value); - } - - @Override - public Condition lt(Field field) { - return delegate.lt(field); - } - - @Override - public Condition lt(Select> query) { - return delegate.lt(query); - } - - @Override - public Condition lt(QuantifiedSelect> query) { - return delegate.lt(query); - } - - @Override - public Condition lessOrEqual(T value) { - return delegate.lessOrEqual(value); - } - - @Override - public Condition lessOrEqual(Field field) { - return delegate.lessOrEqual(field); - } - - @Override - public Condition lessOrEqual(Select> query) { - return delegate.lessOrEqual(query); - } - - @Override - public Condition lessOrEqual(QuantifiedSelect> query) { - return delegate.lessOrEqual(query); - } - - @Override - public Condition le(T value) { - return delegate.le(value); - } - - @Override - public Condition le(Field field) { - return delegate.le(field); - } - - @Override - public Condition le(Select> query) { - return delegate.le(query); - } - - @Override - public Condition le(QuantifiedSelect> query) { - return delegate.le(query); - } - - @Override - public Condition greaterThan(T value) { - return delegate.greaterThan(value); - } - - @Override - public Condition greaterThan(Field field) { - return delegate.greaterThan(field); - } - - @Override - public Condition greaterThan(Select> query) { - return delegate.greaterThan(query); - } - - @Override - public Condition greaterThan(QuantifiedSelect> query) { - return delegate.greaterThan(query); - } - - @Override - public Condition gt(T value) { - return delegate.gt(value); - } - - @Override - public Condition gt(Field field) { - return delegate.gt(field); - } - - @Override - public Condition gt(Select> query) { - return delegate.gt(query); - } - - @Override - public Condition gt(QuantifiedSelect> query) { - return delegate.gt(query); - } - - @Override - public Condition greaterOrEqual(T value) { - return delegate.greaterOrEqual(value); - } - - @Override - public Condition greaterOrEqual(Field field) { - return delegate.greaterOrEqual(field); - } - - @Override - public Condition greaterOrEqual(Select> query) { - return delegate.greaterOrEqual(query); - } - - @Override - public Condition greaterOrEqual(QuantifiedSelect> query) { - return delegate.greaterOrEqual(query); - } - - @Override - public Condition ge(T value) { - return delegate.ge(value); - } - - @Override - public Condition ge(Field field) { - return delegate.ge(field); - } - - @Override - public Condition ge(Select> query) { - return delegate.ge(query); - } - - @Override - public Condition ge(QuantifiedSelect> query) { - return delegate.ge(query); - } - - @Override - public Condition isTrue() { - return delegate.isTrue(); - } - - @Override - public Condition isFalse() { - return delegate.isFalse(); - } - - @Override - public Condition equalIgnoreCase(String value) { - return delegate.equalIgnoreCase(value); - } - - @Override - public Condition equalIgnoreCase(Field value) { - return delegate.equalIgnoreCase(value); - } - - @Override - public Condition notEqualIgnoreCase(String value) { - return delegate.notEqualIgnoreCase(value); - } - - @Override - public Condition notEqualIgnoreCase(Field value) { - return delegate.notEqualIgnoreCase(value); - } - - @Override - public Field sign() { - return delegate.sign(); - } - - @Override - public Field abs() { - return delegate.abs(); - } - - @Override - public Field round() { - return delegate.round(); - } - - @Override - public Field round(int decimals) { - return delegate.round(decimals); - } - - @Override - public Field floor() { - return delegate.floor(); - } - - @Override - public Field ceil() { - return delegate.ceil(); - } - - @Override - public Field sqrt() { - return delegate.sqrt(); - } - - @Override - public Field exp() { - return delegate.exp(); - } - - @Override - public Field ln() { - return delegate.ln(); - } - - @Override - public Field log(int base) { - return delegate.log(base); - } - - @Override - public Field pow(Number exponent) { - return delegate.pow(exponent); - } - - @Override - public Field power(Number exponent) { - return delegate.power(exponent); - } - - @Override - public Field acos() { - return delegate.acos(); - } - - @Override - public Field asin() { - return delegate.asin(); - } - - @Override - public Field atan() { - return delegate.atan(); - } - - @Override - public Field atan2(Number y) { - return delegate.atan2(y); - } - - @Override - public Field atan2(Field y) { - return delegate.atan2(y); - } - - @Override - public Field cos() { - return delegate.cos(); - } - - @Override - public Field sin() { - return delegate.sin(); - } - - @Override - public Field tan() { - return delegate.tan(); - } - - @Override - public Field cot() { - return delegate.cot(); - } - - @Override - public Field sinh() { - return delegate.sinh(); - } - - @Override - public Field cosh() { - return delegate.cosh(); - } - - @Override - public Field tanh() { - return delegate.tanh(); - } - - @Override - public Field coth() { - return delegate.coth(); - } - - @Override - public Field deg() { - return delegate.deg(); - } - - @Override - public Field rad() { - return delegate.rad(); - } - - @Override - public Field count() { - return delegate.count(); - } - - @Override - public Field countDistinct() { - return delegate.countDistinct(); - } - - @Override - public Field max() { - return delegate.max(); - } - - @Override - public Field min() { - return delegate.min(); - } - - @Override - public Field sum() { - return delegate.sum(); - } - - @Override - public Field avg() { - return delegate.avg(); - } - - @Override - public Field median() { - return delegate.median(); - } - - @Override - public Field stddevPop() { - return delegate.stddevPop(); - } - - @Override - public Field stddevSamp() { - return delegate.stddevSamp(); - } - - @Override - public Field varPop() { - return delegate.varPop(); - } - - @Override - public Field varSamp() { - return delegate.varSamp(); - } - - @Override - public WindowPartitionByStep countOver() { - return delegate.countOver(); - } - - @Override - public WindowPartitionByStep maxOver() { - return delegate.maxOver(); - } - - @Override - public WindowPartitionByStep minOver() { - return delegate.minOver(); - } - - @Override - public WindowPartitionByStep sumOver() { - return delegate.sumOver(); - } - - @Override - public WindowPartitionByStep avgOver() { - return delegate.avgOver(); - } - - @Override - public WindowIgnoreNullsStep firstValue() { - return delegate.firstValue(); - } - - @Override - public WindowIgnoreNullsStep lastValue() { - return delegate.lastValue(); - } - - @Override - public WindowIgnoreNullsStep lead() { - return delegate.lead(); - } - - @Override - public WindowIgnoreNullsStep lead(int offset) { - return delegate.lead(offset); - } - - @Override - public WindowIgnoreNullsStep lead(int offset, T defaultValue) { - return delegate.lead(offset, defaultValue); - } - - @Override - public WindowIgnoreNullsStep lead(int offset, Field defaultValue) { - return delegate.lead(offset, defaultValue); - } - - @Override - public WindowIgnoreNullsStep lag() { - return delegate.lag(); - } - - @Override - public WindowIgnoreNullsStep lag(int offset) { - return delegate.lag(offset); - } - - @Override - public WindowIgnoreNullsStep lag(int offset, T defaultValue) { - return delegate.lag(offset, defaultValue); - } - - @Override - public WindowIgnoreNullsStep lag(int offset, Field defaultValue) { - return delegate.lag(offset, defaultValue); - } - - @Override - public WindowPartitionByStep stddevPopOver() { - return delegate.stddevPopOver(); - } - - @Override - public WindowPartitionByStep stddevSampOver() { - return delegate.stddevSampOver(); - } - - @Override - public WindowPartitionByStep varPopOver() { - return delegate.varPopOver(); - } - - @Override - public WindowPartitionByStep varSampOver() { - return delegate.varSampOver(); - } - - @Override - public Field upper() { - return delegate.upper(); - } - - @Override - public Field lower() { - return delegate.lower(); - } - - @Override - public Field trim() { - return delegate.trim(); - } - - @Override - public Field rtrim() { - return delegate.rtrim(); - } - - @Override - public Field ltrim() { - return delegate.ltrim(); - } - - @Override - public Field rpad(Field length) { - return delegate.rpad(length); - } - - @Override - public Field rpad(int length) { - return delegate.rpad(length); - } - - @Override - public Field rpad(Field length, Field character) { - return delegate.rpad(length, character); - } - - @Override - public Field rpad(int length, char character) { - return delegate.rpad(length, character); - } - - @Override - public Field lpad(Field length) { - return delegate.lpad(length); - } - - @Override - public Field lpad(int length) { - return delegate.lpad(length); - } - - @Override - public Field lpad(Field length, Field character) { - return delegate.lpad(length, character); - } - - @Override - public Field lpad(int length, char character) { - return delegate.lpad(length, character); - } - - @Override - public Field repeat(Number count) { - return delegate.repeat(count); - } - - @Override - public Field repeat(Field count) { - return delegate.repeat(count); - } - - @Override - public Field replace(Field search) { - return delegate.replace(search); - } - - @Override - public Field replace(String search) { - return delegate.replace(search); - } - - @Override - public Field replace(Field search, Field replace) { - return delegate.replace(search, replace); - } - - @Override - public Field replace(String search, String replace) { - return delegate.replace(search, replace); - } - - @Override - public Field position(String search) { - return delegate.position(search); - } - - @Override - public Field position(Field search) { - return delegate.position(search); - } - - @Override - public Field ascii() { - return delegate.ascii(); - } - - @Override - public Field concat(Field... fields) { - return delegate.concat(fields); - } - - @Override - public Field concat(String... values) { - return delegate.concat(values); - } - - @Override - public Field substring(int startingPosition) { - return delegate.substring(startingPosition); - } - - @Override - public Field substring(Field startingPosition) { - return delegate.substring(startingPosition); - } - - @Override - public Field substring(int startingPosition, int length) { - return delegate.substring(startingPosition, length); - } - - @Override - public Field substring(Field startingPosition, - Field length) { - return delegate.substring(startingPosition, length); - } - - @Override - public Field length() { - return delegate.length(); - } - - @Override - public Field charLength() { - return delegate.charLength(); - } - - @Override - public Field bitLength() { - return delegate.bitLength(); - } - - @Override - public Field octetLength() { - return delegate.octetLength(); - } - - @Override - public Field extract(DatePart datePart) { - return delegate.extract(datePart); - } - - @SuppressWarnings("unchecked") - @Override - public Field greatest(T... others) { - return delegate.greatest(others); - } - - @Override - public Field greatest(Field... others) { - return delegate.greatest(others); - } - - @SuppressWarnings("unchecked") - @Override - public Field least(T... others) { - return delegate.least(others); - } - - @Override - public Field least(Field... others) { - return delegate.least(others); - } - - @Override - public Field nvl(T defaultValue) { - return delegate.nvl(defaultValue); - } - - @Override - public Field nvl(Field defaultValue) { - return delegate.nvl(defaultValue); - } - - @Override - public Field nvl2(Z valueIfNotNull, Z valueIfNull) { - return delegate.nvl2(valueIfNotNull, valueIfNull); - } - - @Override - public Field nvl2(Field valueIfNotNull, Field valueIfNull) { - return delegate.nvl2(valueIfNotNull, valueIfNull); - } - - @Override - public Field nullif(T other) { - return delegate.nullif(other); - } - - @Override - public Field nullif(Field other) { - return delegate.nullif(other); - } - - @Override - public Field decode(T search, Z result) { - return delegate.decode(search, result); - } - - @Override - public Field decode(T search, Z result, Object... more) { - return delegate.decode(search, result, more); - } - - @Override - public Field decode(Field search, Field result) { - return delegate.decode(search, result); - } - - @Override - public Field decode(Field search, Field result, Field... more) { - return delegate.decode(search, result, more); - } - - @SuppressWarnings("unchecked") - @Override - public Field coalesce(T option, T... options) { - return delegate.coalesce(option, options); - } - - @Override - public Field coalesce(Field option, Field... options) { - return delegate.coalesce(option, options); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DslContextFactory.java b/engine/backend/common/src/main/java/com/torodb/backend/DslContextFactory.java deleted file mode 100644 index b3d1b360..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/DslContextFactory.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import org.jooq.DSLContext; - -import java.sql.Connection; - -/** - * Given a connection, this factory generates a configured DSLContext. - */ -@FunctionalInterface -public interface DslContextFactory { - - public DSLContext createDslContext(Connection connection); - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/DslContextFactoryImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/DslContextFactoryImpl.java deleted file mode 100644 index db33f886..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/DslContextFactoryImpl.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.inject.Inject; -import org.jooq.DSLContext; -import org.jooq.impl.DSL; - -import java.sql.Connection; - -/** - * - */ -public class DslContextFactoryImpl implements DslContextFactory { - - public final DataTypeProvider dataTypeProvider; - - @Inject - public DslContextFactoryImpl(DataTypeProvider dataTypeProvider) { - super(); - this.dataTypeProvider = dataTypeProvider; - } - - @Override - public DSLContext createDslContext(Connection connection) { - return DSL.using(connection, dataTypeProvider.getDialect()); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/ErrorHandler.java b/engine/backend/common/src/main/java/com/torodb/backend/ErrorHandler.java deleted file mode 100644 index c2084b7c..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/ErrorHandler.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.exceptions.ToroRuntimeException; -import com.torodb.core.exceptions.user.UserException; -import com.torodb.core.transaction.RollbackException; -import org.jooq.exception.DataAccessException; - -import java.sql.SQLException; - -public interface ErrorHandler { - - /** - * The context of the backend error that reflect the specific operation that is performed when an - * error is received. - */ - public enum Context { - UNKNOWN, - GET_CONNECTION, - CREATE_SCHEMA, - CREATE_TABLE, - ADD_COLUMN, - CREATE_INDEX, - ADD_UNIQUE_INDEX, - ADD_FOREIGN_KEY, - DROP_SCHEMA, - DROP_TABLE, - RENAME_TABLE, - RENAME_INDEX, - SET_TABLE_SCHEMA, - DROP_INDEX, - DROP_UNIQUE_INDEX, - DROP_FOREIGN_KEY, - FETCH, - META_INSERT, - META_DELETE, - INSERT, - UPDATE, - DELETE, - COMMIT, - ROLLBACK, - CLOSE - } - - /** - * Return the unchecked ToroRuntimeException exception that must be thrown. - * - * @param context - * @param sqlException - * @return an unchecked ToroRuntimeException - * @throws RollbackException if the {@code sqlException} is due to a conflict resolvable by - * repeating the operation - */ - ToroRuntimeException handleException(Context context, SQLException sqlException) throws - RollbackException; - - /** - * Return the unchecked ToroRuntimeException exception that must be thrown. - * - * @param context - * @param dataAccessException - * @return an unchecked ToroRuntimeException - * @throws RollbackException if the {@code dataAccessException} is due to a conflict resolvable by - * repeating the operation - */ - ToroRuntimeException handleException(Context context, DataAccessException dataAccessException) - throws RollbackException; - - /** - * Return the unchecked ToroRuntimeException exception that must be thrown. - * - * @param context - * @param sqlException - * @return - * @throws UserException if the {@code sqlException} is due to an user mistake - * @throws RollbackException if the {@code sqlException} is due to a conflict resolvable by - * repeating the operation - */ - ToroRuntimeException handleUserException(Context context, SQLException sqlException) throws - UserException, RollbackException; - - /** - * Return the unchecked ToroRuntimeException exception that must be thrown. - * - * @param context - * @param dataAccessException - * @return - * @throws UserException if the {@code dataAccessException} is due to an user mistake - * @throws RollbackException if the {@code dataAccessException} is due to a conflict resolvable by - * repeating the operation - */ - ToroRuntimeException handleUserException(Context context, DataAccessException dataAccessException) - throws UserException, RollbackException; -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/ExclusiveWriteBackendTransactionImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/ExclusiveWriteBackendTransactionImpl.java deleted file mode 100644 index f4f44048..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/ExclusiveWriteBackendTransactionImpl.java +++ /dev/null @@ -1,216 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.base.Preconditions; -import com.torodb.core.backend.ExclusiveWriteBackendTransaction; -import com.torodb.core.d2r.IdentifierFactory; -import com.torodb.core.d2r.ReservedIdGenerator; -import com.torodb.core.exceptions.InvalidDatabaseException; -import com.torodb.core.transaction.RollbackException; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaDocPartIndexColumn; -import com.torodb.core.transaction.metainf.MetaField; -import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex; -import com.torodb.core.transaction.metainf.MetaIndex; -import com.torodb.core.transaction.metainf.MetaIndexField; -import com.torodb.core.transaction.metainf.MetaScalar; -import com.torodb.core.transaction.metainf.MutableMetaCollection; -import com.torodb.core.transaction.metainf.MutableMetaDatabase; -import com.torodb.core.transaction.metainf.MutableMetaDocPart; -import com.torodb.core.transaction.metainf.MutableMetaDocPartIndex; -import com.torodb.core.transaction.metainf.MutableMetaIndex; -import org.jooq.lambda.tuple.Tuple2; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -public class ExclusiveWriteBackendTransactionImpl extends SharedWriteBackendTransactionImpl - implements ExclusiveWriteBackendTransaction { - - private final ReservedIdGenerator ridGenerator; - - public ExclusiveWriteBackendTransactionImpl(SqlInterface sqlInterface, - BackendConnectionImpl backendConnection, - IdentifierFactory identifierFactory, - ReservedIdGenerator ridGenerator) { - super(sqlInterface, backendConnection, identifierFactory); - - this.ridGenerator = ridGenerator; - } - - @Override - public void renameCollection(MetaDatabase fromDb, MetaCollection fromColl, - MutableMetaDatabase toDb, MutableMetaCollection toColl) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - copyMetaCollection(fromDb, fromColl, toDb, toColl); - getSqlInterface().getStructureInterface().renameCollection(getDsl(), fromDb.getIdentifier(), - fromColl, - toDb.getIdentifier(), toColl); - dropMetaCollection(fromDb, fromColl); - } - - @Override - public void dropAll() throws RollbackException { - getSqlInterface().getStructureInterface().dropAll(getDsl()); - } - - @Override - public void dropUserData() throws RollbackException { - getSqlInterface().getStructureInterface().dropUserData(getDsl()); - } - - @Override - public void checkOrCreateMetaDataTables() throws InvalidDatabaseException { - getBackendConnection().getSchemaUpdater().checkOrCreate(getDsl()); - } - - private void copyMetaCollection(MetaDatabase fromDb, MetaCollection fromColl, - MutableMetaDatabase toDb, MutableMetaCollection toColl) { - IdentifierFactory identifierFactory = getIdentifierFactory(); - - Iterator fromMetaIndexIterator = fromColl.streamContainedMetaIndexes() - .iterator(); - while (fromMetaIndexIterator.hasNext()) { - MetaIndex fromMetaIndex = fromMetaIndexIterator.next(); - MutableMetaIndex toMetaIndex = toColl.addMetaIndex(fromMetaIndex.getName(), fromMetaIndex - .isUnique()); - getSqlInterface().getMetaDataWriteInterface() - .addMetaIndex(getDsl(), toDb, toColl, toMetaIndex); - copyIndexFields(fromMetaIndex, toDb, toColl, toMetaIndex); - } - - Iterator fromMetaDocPartIterator = fromColl.streamContainedMetaDocParts() - .iterator(); - while (fromMetaDocPartIterator.hasNext()) { - MetaDocPart fromMetaDocPart = fromMetaDocPartIterator.next(); - MutableMetaDocPart toMetaDocPart = toColl.addMetaDocPart(fromMetaDocPart.getTableRef(), - identifierFactory.toDocPartIdentifier( - toDb, toColl.getName(), fromMetaDocPart.getTableRef())); - getSqlInterface().getMetaDataWriteInterface().addMetaDocPart(getDsl(), toDb, toColl, - toMetaDocPart); - copyScalar(identifierFactory, fromMetaDocPart, toDb, toColl, toMetaDocPart); - copyFields(identifierFactory, fromMetaDocPart, toDb, toColl, toMetaDocPart); - copyIndexes(identifierFactory, fromMetaDocPart, toDb, toColl, toMetaDocPart); - int nextRid = ridGenerator.getDocPartRidGenerator(fromDb.getName(), fromColl.getName()) - .nextRid(fromMetaDocPart.getTableRef()); - ridGenerator.getDocPartRidGenerator(toDb.getName(), toColl.getName()).setNextRid(toMetaDocPart - .getTableRef(), nextRid - 1); - } - } - - private void copyIndexFields(MetaIndex fromMetaIndex, - MetaDatabase toMetaDb, MetaCollection toMetaColl, MutableMetaIndex toMetaIndex) { - Iterator fromMetaIndexFieldIterator = fromMetaIndex.iteratorFields(); - while (fromMetaIndexFieldIterator.hasNext()) { - MetaIndexField fromMetaIndexField = fromMetaIndexFieldIterator.next(); - MetaIndexField toMetaIndexField = toMetaIndex.addMetaIndexField( - fromMetaIndexField.getTableRef(), - fromMetaIndexField.getName(), - fromMetaIndexField.getOrdering()); - getSqlInterface().getMetaDataWriteInterface().addMetaIndexField( - getDsl(), toMetaDb, toMetaColl, toMetaIndex, toMetaIndexField); - } - } - - private void copyScalar(IdentifierFactory identifierFactory, MetaDocPart fromMetaDocPart, - MetaDatabase toMetaDb, MetaCollection toMetaColl, MutableMetaDocPart toMetaDocPart) { - Iterator fromMetaScalarIterator = fromMetaDocPart.streamScalars() - .iterator(); - while (fromMetaScalarIterator.hasNext()) { - MetaScalar fromMetaScalar = fromMetaScalarIterator.next(); - MetaScalar toMetaScalar = toMetaDocPart.addMetaScalar( - identifierFactory.toFieldIdentifierForScalar(fromMetaScalar.getType()), - fromMetaScalar.getType()); - getSqlInterface().getMetaDataWriteInterface().addMetaScalar( - getDsl(), toMetaDb, toMetaColl, toMetaDocPart, toMetaScalar); - } - } - - private void copyFields(IdentifierFactory identifierFactory, MetaDocPart fromMetaDocPart, - MetaDatabase toMetaDb, MetaCollection toMetaColl, MutableMetaDocPart toMetaDocPart) { - Iterator fromMetaFieldIterator = fromMetaDocPart.streamFields().iterator(); - while (fromMetaFieldIterator.hasNext()) { - MetaField fromMetaField = fromMetaFieldIterator.next(); - MetaField toMetaField = toMetaDocPart.addMetaField( - fromMetaField.getName(), - identifierFactory.toFieldIdentifier(toMetaDocPart, fromMetaField.getName(), fromMetaField - .getType()), - fromMetaField.getType()); - getSqlInterface().getMetaDataWriteInterface().addMetaField( - getDsl(), toMetaDb, toMetaColl, toMetaDocPart, toMetaField); - } - } - - private void copyIndexes(IdentifierFactory identifierFactory, MetaDocPart fromMetaDocPart, - MetaDatabase toMetaDb, MetaCollection toMetaColl, MutableMetaDocPart toMetaDocPart) { - Iterator fromMetaDocPartIndexIterator = fromMetaDocPart - .streamIndexes().iterator(); - while (fromMetaDocPartIndexIterator.hasNext()) { - MetaIdentifiedDocPartIndex fromMetaDocPartIndex = fromMetaDocPartIndexIterator.next(); - MutableMetaDocPartIndex toMutableMetaDocPartIndex = toMetaDocPart.addMetaDocPartIndex( - fromMetaDocPartIndex.isUnique()); - List> identifiers = - copyMetaIndexColumns(fromMetaDocPartIndex, toMutableMetaDocPartIndex); - MetaIdentifiedDocPartIndex toMetaDocPartIndex = toMutableMetaDocPartIndex.immutableCopy( - identifierFactory.toIndexIdentifier( - toMetaDb, - toMetaDocPart.getIdentifier(), - identifiers) - ); - getSqlInterface().getMetaDataWriteInterface().addMetaDocPartIndex( - getDsl(), toMetaDb, toMetaColl, toMetaDocPart, toMetaDocPartIndex); - writeIndexColumns(toMetaDb, toMetaColl, toMetaDocPart, toMetaDocPartIndex); - } - } - - private List> copyMetaIndexColumns( - MetaIdentifiedDocPartIndex fromMetaDocPartIndex, - MutableMetaDocPartIndex toMetaDocPartIndex) { - List> identifiers = new ArrayList<>(); - Iterator fromMetaDocPartIndexColumnIterator = - fromMetaDocPartIndex.iteratorColumns(); - while (fromMetaDocPartIndexColumnIterator.hasNext()) { - MetaDocPartIndexColumn fromMetaDocPartIndexColumn = fromMetaDocPartIndexColumnIterator.next(); - toMetaDocPartIndex.addMetaDocPartIndexColumn( - fromMetaDocPartIndexColumn.getIdentifier(), fromMetaDocPartIndexColumn.getOrdering()); - identifiers.add(new Tuple2<>(fromMetaDocPartIndexColumn.getIdentifier(), - fromMetaDocPartIndexColumn.getOrdering().isAscending())); - } - return identifiers; - } - - private void writeIndexColumns(MetaDatabase toMetaDb, MetaCollection toMetaColl, - MetaDocPart toMetaDocPart, - MetaIdentifiedDocPartIndex toMetaDocPartIndex) { - Iterator toMetaDocPartIndexColumnIterator = toMetaDocPartIndex - .iteratorColumns(); - while (toMetaDocPartIndexColumnIterator.hasNext()) { - MetaDocPartIndexColumn toMetaDocPartIndexColumn = toMetaDocPartIndexColumnIterator.next(); - getSqlInterface().getMetaDataWriteInterface().addMetaDocPartIndexColumn( - getDsl(), toMetaDb, toMetaColl, toMetaDocPart, toMetaDocPartIndex, - toMetaDocPartIndexColumn); - } - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/InternalField.java b/engine/backend/common/src/main/java/com/torodb/backend/InternalField.java deleted file mode 100644 index 13eacd4a..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/InternalField.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.d2r.DocPartRow; -import org.jooq.Field; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Types; - -public abstract class InternalField extends DelegatorField { - - private static final long serialVersionUID = 1L; - - public InternalField(Field field) { - super(field); - } - - public abstract void set(PreparedStatement preparedStatement, int index, DocPartRow docPartRow) - throws SQLException; - - public abstract T getValue(ResultSet rs, int index) throws SQLException; - - public abstract T getValue(DocPartRow docPartRow); - - public boolean isDid() { - return false; - } - - public boolean isRid() { - return false; - } - - public boolean isPid() { - return false; - } - - public boolean isSeq() { - return false; - } - - public boolean isNullable() { - return false; - } - - public static class DidInternalField extends InternalField { - - private static final long serialVersionUID = 1L; - - public DidInternalField(Field field) { - super(field); - } - - @Override - public boolean isDid() { - return true; - } - - @Override - public Integer getValue(ResultSet rs, int index) throws SQLException { - return rs.getInt(index); - } - - @Override - public Integer getValue(DocPartRow docPartRow) { - return docPartRow.getDid(); - } - - @Override - public void set(PreparedStatement preparedStatement, int index, DocPartRow docPartRow) throws - SQLException { - preparedStatement.setInt(index, docPartRow.getDid()); - } - } - - public static class RidInternalField extends InternalField { - - private static final long serialVersionUID = 1L; - - public RidInternalField(Field field) { - super(field); - } - - @Override - public boolean isRid() { - return true; - } - - @Override - public Integer getValue(ResultSet rs, int index) throws SQLException { - return rs.getInt(index); - } - - @Override - public Integer getValue(DocPartRow docPartRow) { - return docPartRow.getRid(); - } - - @Override - public void set(PreparedStatement preparedStatement, int index, DocPartRow docPartRow) throws - SQLException { - preparedStatement.setInt(index, docPartRow.getRid()); - } - - } - - public static class PidInternalField extends InternalField { - - private static final long serialVersionUID = 1L; - - public PidInternalField(Field field) { - super(field); - } - - @Override - public boolean isPid() { - return true; - } - - @Override - public Integer getValue(ResultSet rs, int index) throws SQLException { - return rs.getInt(index); - } - - @Override - public Integer getValue(DocPartRow docPartRow) { - return docPartRow.getPid(); - } - - @Override - public void set(PreparedStatement preparedStatement, int index, DocPartRow docPartRow) throws - SQLException { - preparedStatement.setInt(index, docPartRow.getPid()); - } - - } - - public static class SeqInternalField extends InternalField { - - private static final long serialVersionUID = 1L; - - public SeqInternalField(Field field) { - super(field); - } - - @Override - public boolean isSeq() { - return true; - } - - @Override - public boolean isNullable() { - return true; - } - - @Override - public Integer getValue(ResultSet rs, int index) throws SQLException { - return rs.getInt(index); - } - - @Override - public Integer getValue(DocPartRow docPartRow) { - return docPartRow.getSeq(); - } - - @Override - public void set(PreparedStatement preparedStatement, int index, DocPartRow docPartRow) throws - SQLException { - if (docPartRow.getSeq() != null) { - preparedStatement.setInt(index, docPartRow.getSeq()); - } else { - preparedStatement.setNull(index, Types.INTEGER); - } - } - - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/KvMetainfoHandler.java b/engine/backend/common/src/main/java/com/torodb/backend/KvMetainfoHandler.java deleted file mode 100644 index 7d3f2cd2..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/KvMetainfoHandler.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.backend.MetaInfoKey; -import com.torodb.kvdocument.values.KvBoolean; -import com.torodb.kvdocument.values.KvDouble; -import com.torodb.kvdocument.values.KvInteger; -import com.torodb.kvdocument.values.KvLong; -import com.torodb.kvdocument.values.KvNull; -import com.torodb.kvdocument.values.KvString; -import com.torodb.kvdocument.values.KvValue; -import com.torodb.kvdocument.values.KvValueAdaptor; -import com.torodb.kvdocument.values.heap.StringKvString; -import org.jooq.DSLContext; - -import java.util.Optional; - -import javax.inject.Inject; - -public class KvMetainfoHandler { - - private static final Serializer SERIALIZER = new Serializer(); - private final SqlInterface sqlInterface; - - @Inject - public KvMetainfoHandler(SqlInterface sqlInterface) { - this.sqlInterface = sqlInterface; - } - - Optional> readMetaInfo(DSLContext dsl, MetaInfoKey key) { - MetaDataReadInterface metaReadI = sqlInterface.getMetaDataReadInterface(); - - return metaReadI.readKv(dsl, key) - .map(this::fromStorableString); - } - - KvValue writeMetaInfo(DSLContext dsl, MetaInfoKey key, KvValue newValue) { - String storableString = toStorableString(newValue); - String storedString = sqlInterface.getMetaDataWriteInterface() - .writeMetaInfo(dsl, key, storableString); - if (storedString == null) { - return null; - } - return fromStorableString(storedString); - } - - private KvValue fromStorableString(String value) { - switch (value) { - case "true": - return KvBoolean.TRUE; - case "false": - return KvBoolean.FALSE; - case "null": - return KvNull.getInstance(); - default: - } - - if (value.isEmpty()) { - return new StringKvString(value); - } - char c = value.charAt(0); - if (c < '0' || c > '9') { - return new StringKvString(value); - } - try { - int i = Integer.parseInt(value); - return KvInteger.of(i); - } catch (NumberFormatException ignore) { - //just try another conversion - } - try { - long l = Long.parseLong(value); - return KvLong.of(l); - } catch (NumberFormatException ignore) { - //just try another conversion - } - try { - double d = Double.parseDouble(value); - return KvDouble.of(d); - } catch (NumberFormatException ignore) { - //just try another conversion - } - return new StringKvString(value); - } - - private String toStorableString(KvValue value) { - return value.accept(SERIALIZER, null); - } - - private static class Serializer extends KvValueAdaptor { - - @Override - public String defaultCase(KvValue value, Void arg) { - //TODO: Support all kind of kv values - throw new UnsupportedOperationException(value.getType() - + " is not supported as metainf value yet."); - } - - @Override - public String visit(KvString value, Void arg) { - return value.toString(); - } - - @Override - public String visit(KvDouble value, Void arg) { - return Double.toString(value.getValue()); - } - - @Override - public String visit(KvLong value, Void arg) { - return Long.toString(value.getValue()); - } - - @Override - public String visit(KvInteger value, Void arg) { - return Integer.toString(value.getValue()); - } - - @Override - public String visit(KvNull value, Void arg) { - return "null"; - } - - @Override - public String visit(KvBoolean value, Void arg) { - return Boolean.toString(value.getValue()); - } - - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/LazyBackendCursor.java b/engine/backend/common/src/main/java/com/torodb/backend/LazyBackendCursor.java deleted file mode 100644 index 61ea8398..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/LazyBackendCursor.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.base.Preconditions; -import com.torodb.core.backend.BackendCursor; -import com.torodb.core.cursors.Cursor; -import com.torodb.core.d2r.DocPartResult; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import org.jooq.DSLContext; - -import javax.annotation.Nonnull; - -/** - * - */ -public class LazyBackendCursor implements BackendCursor { - - private final Cursor didCursor; - private final DefaultDocPartResultCursor docCursor; - private boolean usedAsDocPartCursor = false; - private boolean usedAsDidCursor = false; - - public LazyBackendCursor( - @Nonnull SqlInterface sqlInterface, - final @Nonnull Cursor didCursor, - @Nonnull DSLContext dsl, - @Nonnull MetaDatabase metaDatabase, - @Nonnull MetaCollection metaCollection) { - docCursor = new DefaultDocPartResultCursor(sqlInterface, didCursor, dsl, metaDatabase, - metaCollection); - this.didCursor = didCursor; - } - - @Override - public Cursor asDocPartResultCursor() { - Preconditions.checkState(!usedAsDidCursor, "This cursor has already been used as a did cursor"); - usedAsDocPartCursor = true; - return docCursor; - } - - @Override - public Cursor asDidCursor() { - Preconditions.checkState(!usedAsDocPartCursor, - "This cursor has already been used as a doc part cursor"); - usedAsDidCursor = true; - return didCursor; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/MetaDataReadInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/MetaDataReadInterface.java deleted file mode 100644 index d87356a2..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/MetaDataReadInterface.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.collect.Lists; -import com.torodb.backend.tables.KvTable; -import com.torodb.backend.tables.MetaCollectionTable; -import com.torodb.backend.tables.MetaDatabaseTable; -import com.torodb.backend.tables.MetaDocPartIndexColumnTable; -import com.torodb.backend.tables.MetaDocPartIndexTable; -import com.torodb.backend.tables.MetaDocPartTable; -import com.torodb.backend.tables.MetaFieldTable; -import com.torodb.backend.tables.MetaIndexFieldTable; -import com.torodb.backend.tables.MetaIndexTable; -import com.torodb.backend.tables.MetaScalarTable; -import com.torodb.backend.tables.SemanticTable; -import com.torodb.backend.tables.records.KvRecord; -import com.torodb.backend.tables.records.MetaCollectionRecord; -import com.torodb.backend.tables.records.MetaDatabaseRecord; -import com.torodb.backend.tables.records.MetaDocPartIndexColumnRecord; -import com.torodb.backend.tables.records.MetaDocPartIndexRecord; -import com.torodb.backend.tables.records.MetaDocPartRecord; -import com.torodb.backend.tables.records.MetaFieldRecord; -import com.torodb.backend.tables.records.MetaIndexFieldRecord; -import com.torodb.backend.tables.records.MetaIndexRecord; -import com.torodb.backend.tables.records.MetaScalarRecord; -import com.torodb.core.TableRef; -import com.torodb.core.backend.MetaInfoKey; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import org.jooq.DSLContext; - -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import java.util.stream.Stream; - -import javax.annotation.Nonnull; - -public interface MetaDataReadInterface { - - @Nonnull - MetaDatabaseTable getMetaDatabaseTable(); - - @Nonnull - MetaCollectionTable getMetaCollectionTable(); - - @Nonnull - > MetaDocPartTable getMetaDocPartTable(); - - @Nonnull - > MetaFieldTable getMetaFieldTable(); - - @Nonnull - > MetaScalarTable getMetaScalarTable(); - - @Nonnull - > MetaDocPartIndexTable getMetaDocPartIndexTable(); - - @Nonnull - @SuppressWarnings("checkstyle:LineLength") - > MetaDocPartIndexColumnTable getMetaDocPartIndexColumnTable(); - - @Nonnull - MetaIndexTable getMetaIndexTable(); - - @Nonnull - > MetaIndexFieldTable getMetaIndexFieldTable(); - - @Nonnull - KvTable getKvTable(); - - @Nonnull - Collection> getInternalFields(@Nonnull MetaDocPart metaDocPart); - - @Nonnull - Collection> getInternalFields(@Nonnull TableRef tableRef); - - @Nonnull - Collection> getPrimaryKeyInternalFields(@Nonnull TableRef tableRef); - - @Nonnull - Collection> getReferenceInternalFields(@Nonnull TableRef tableRef); - - @Nonnull - Collection> getForeignInternalFields(@Nonnull TableRef tableRef); - - @Nonnull - Collection> getReadInternalFields(@Nonnull MetaDocPart metaDocPart); - - @Nonnull - Collection> getReadInternalFields(@Nonnull TableRef tableRef); - - long getDatabaseSize(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database); - - long getCollectionSize(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection); - - long getDocumentsSize(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection); - - Long getIndexSize(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull String index); - - Optional readKv(@Nonnull DSLContext dsl, @Nonnull MetaInfoKey key); - - Stream readMetaDatabaseTable(DSLContext dsl); - - /** - * - * @return - */ - default List> getMetaTables() { - return Lists.newArrayList( - getKvTable(), - getMetaDocPartIndexColumnTable(), - getMetaDocPartIndexTable(), - getMetaIndexFieldTable(), - getMetaScalarTable(), - getMetaFieldTable(), - getMetaDocPartTable(), - getMetaIndexTable(), - getMetaCollectionTable(), - getMetaDatabaseTable() - ); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/MetaDataWriteInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/MetaDataWriteInterface.java deleted file mode 100644 index 39b5de85..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/MetaDataWriteInterface.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.backend.MetaInfoKey; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaDocPartIndexColumn; -import com.torodb.core.transaction.metainf.MetaField; -import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex; -import com.torodb.core.transaction.metainf.MetaIndex; -import com.torodb.core.transaction.metainf.MetaIndexField; -import com.torodb.core.transaction.metainf.MetaScalar; -import org.jooq.DSLContext; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; - -public interface MetaDataWriteInterface { - - void createMetaDatabaseTable(@Nonnull DSLContext dsl); - - void createMetaCollectionTable(@Nonnull DSLContext dsl); - - void createMetaDocPartTable(@Nonnull DSLContext dsl); - - void createMetaFieldTable(@Nonnull DSLContext dsl); - - void createMetaScalarTable(@Nonnull DSLContext dsl); - - void createMetaIndexTable(@Nonnull DSLContext dsl); - - void createMetaIndexFieldTable(@Nonnull DSLContext dsl); - - void createMetaDocPartIndexTable(@Nonnull DSLContext dsl); - - void createMetaFieldIndexTable(@Nonnull DSLContext dsl); - - void createKvTable(@Nonnull DSLContext dsl); - - void addMetaDatabase(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database); - - void addMetaCollection(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection); - - void addMetaDocPart(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaDocPart docPart); - - void addMetaField(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaDocPart docPart, @Nonnull MetaField field); - - void addMetaScalar(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaDocPart docPart, @Nonnull MetaScalar scalar); - - void addMetaIndex(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaIndex index); - - void addMetaIndexField(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaIndex index, @Nonnull MetaIndexField field); - - void addMetaDocPartIndex(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaDocPart docPart, - @Nonnull MetaIdentifiedDocPartIndex index); - - void addMetaDocPartIndexColumn(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaDocPart docPart, - @Nonnull MetaIdentifiedDocPartIndex index, @Nonnull MetaDocPartIndexColumn field); - - void deleteMetaDatabase(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database); - - void deleteMetaCollection(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection); - - void deleteMetaIndex(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaIndex index); - - void deleteMetaDocPartIndex(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaDocPart docPart, - @Nonnull MetaIdentifiedDocPartIndex index); - - int consumeRids(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection, @Nonnull MetaDocPart docPart, int count); - - @Nullable - String writeMetaInfo(@Nonnull DSLContext dsl, @Nonnull MetaInfoKey key, @Nonnull String newValue); -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/ReadInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/ReadInterface.java deleted file mode 100644 index 97286177..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/ReadInterface.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.collect.Multimap; -import com.torodb.core.cursors.Cursor; -import com.torodb.core.d2r.DocPartResult; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaField; -import com.torodb.kvdocument.values.KvValue; -import org.jooq.DSLContext; -import org.jooq.lambda.tuple.Tuple2; - -import java.sql.SQLException; -import java.util.Collection; -import java.util.List; - -import javax.annotation.Nonnull; - -public interface ReadInterface { - - @Nonnull - Cursor getCollectionDidsWithFieldEqualsTo(@Nonnull DSLContext dsl, - @Nonnull MetaDatabase metaDatabase, - @Nonnull MetaCollection metaCol, @Nonnull MetaDocPart metaDocPart, - @Nonnull MetaField metaField, @Nonnull KvValue value) - throws SQLException; - - @Nonnull - public Cursor getCollectionDidsWithFieldsIn(DSLContext dsl, MetaDatabase metaDatabase, - MetaCollection metaCol, MetaDocPart metaDocPart, Multimap> valuesMap) - throws SQLException; - - @Nonnull - public Cursor>> getCollectionDidsAndProjectionWithFieldsIn( - DSLContext dsl, MetaDatabase metaDatabase, - MetaCollection metaCol, MetaDocPart metaDocPart, - Multimap> valuesMultimap) - throws SQLException; - - long countAll(@Nonnull DSLContext dsl, @Nonnull MetaDatabase database, - @Nonnull MetaCollection collection); - - @Nonnull - Cursor getAllCollectionDids(@Nonnull DSLContext dsl, @Nonnull MetaDatabase metaDatabase, - @Nonnull MetaCollection metaCollection) - throws SQLException; - - @Nonnull - List getCollectionResultSets(@Nonnull DSLContext dsl, - @Nonnull MetaDatabase metaDatabase, @Nonnull MetaCollection metaCollection, - @Nonnull Cursor didCursor, int maxSize) throws SQLException; - - @Nonnull - List getCollectionResultSets(@Nonnull DSLContext dsl, - @Nonnull MetaDatabase metaDatabase, @Nonnull MetaCollection metaCollection, - @Nonnull Collection dids) throws SQLException; - - int getLastRowIdUsed(@Nonnull DSLContext dsl, @Nonnull MetaDatabase metaDatabase, - @Nonnull MetaCollection metaCollection, @Nonnull MetaDocPart metaDocPart); -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/ReadOnlyBackendTransactionImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/ReadOnlyBackendTransactionImpl.java deleted file mode 100644 index 4de0fc4f..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/ReadOnlyBackendTransactionImpl.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.backend.ReadOnlyBackendTransaction; - -/** - * - */ -public class ReadOnlyBackendTransactionImpl extends BackendTransactionImpl implements - ReadOnlyBackendTransaction { - - public ReadOnlyBackendTransactionImpl(SqlInterface sqlInterface, - BackendConnectionImpl backendConnection) { - super(sqlInterface.getDbBackend().createReadOnlyConnection(), - sqlInterface, backendConnection); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/SharedWriteBackendTransactionImpl.java b/engine/backend/common/src/main/java/com/torodb/backend/SharedWriteBackendTransactionImpl.java deleted file mode 100644 index 84f99253..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/SharedWriteBackendTransactionImpl.java +++ /dev/null @@ -1,379 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.google.common.base.Preconditions; -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.core.TableRef; -import com.torodb.core.backend.MetaInfoKey; -import com.torodb.core.backend.SharedWriteBackendTransaction; -import com.torodb.core.d2r.DocPartData; -import com.torodb.core.d2r.IdentifierFactory; -import com.torodb.core.exceptions.user.UserException; -import com.torodb.core.transaction.RollbackException; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaDocPart; -import com.torodb.core.transaction.metainf.MetaDocPartIndexColumn; -import com.torodb.core.transaction.metainf.MetaField; -import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex; -import com.torodb.core.transaction.metainf.MetaIndex; -import com.torodb.core.transaction.metainf.MetaIndexField; -import com.torodb.core.transaction.metainf.MetaScalar; -import com.torodb.core.transaction.metainf.MutableMetaCollection; -import com.torodb.core.transaction.metainf.MutableMetaDocPart; -import com.torodb.core.transaction.metainf.MutableMetaDocPartIndex; -import com.torodb.kvdocument.values.KvValue; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.jooq.lambda.tuple.Tuple2; - -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; - -public class SharedWriteBackendTransactionImpl extends BackendTransactionImpl implements - SharedWriteBackendTransaction { - - @SuppressWarnings("checkstyle:LineLength") - private static final Logger LOGGER = LogManager.getLogger(SharedWriteBackendTransactionImpl.class); - - private final IdentifierFactory identifierFactory; - - public SharedWriteBackendTransactionImpl(SqlInterface sqlInterface, - BackendConnectionImpl backendConnection, - IdentifierFactory identifierFactory) { - super(sqlInterface.getDbBackend().createWriteConnection(), sqlInterface, backendConnection); - - this.identifierFactory = identifierFactory; - } - - IdentifierFactory getIdentifierFactory() { - return identifierFactory; - } - - @Override - public void addDatabase(MetaDatabase db) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - getSqlInterface().getMetaDataWriteInterface().addMetaDatabase(getDsl(), db); - getSqlInterface().getStructureInterface().createSchema(getDsl(), db.getIdentifier()); - } - - @Override - public void addCollection(MetaDatabase db, MetaCollection newCol) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - getSqlInterface().getMetaDataWriteInterface().addMetaCollection(getDsl(), db, newCol); - } - - @Override - public void dropCollection(MetaDatabase db, MetaCollection coll) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - dropMetaCollection(db, coll); - getSqlInterface().getStructureInterface().dropCollection(getDsl(), db.getIdentifier(), coll); - } - - @Override - public void dropDatabase(MetaDatabase db) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - Iterator metaCollectionIterator = db.streamMetaCollections() - .iterator(); - while (metaCollectionIterator.hasNext()) { - MetaCollection metaCollection = metaCollectionIterator.next(); - dropMetaCollection(db, metaCollection); - } - getSqlInterface().getMetaDataWriteInterface().deleteMetaDatabase(getDsl(), db); - getSqlInterface().getStructureInterface().dropDatabase(getDsl(), db); - } - - protected void dropMetaCollection(MetaDatabase database, MetaCollection coll) { - getSqlInterface().getMetaDataWriteInterface().deleteMetaCollection(getDsl(), database, coll); - } - - @Override - public void addDocPart(MetaDatabase db, MetaCollection col, MetaDocPart newDocPart) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - getSqlInterface().getMetaDataWriteInterface().addMetaDocPart(getDsl(), db, col, - newDocPart); - - TableRef tableRef = newDocPart.getTableRef(); - if (tableRef.isRoot()) { - getSqlInterface().getStructureInterface().createRootDocPartTable(getDsl(), db.getIdentifier(), - newDocPart.getIdentifier(), tableRef); - getSqlInterface().getStructureInterface().streamRootDocPartTableIndexesCreation(db - .getIdentifier(), newDocPart.getIdentifier(), tableRef) - .forEach(consumer -> { - String index = consumer.apply(getDsl()); - LOGGER.info("Created internal index {} for table {}", index, - newDocPart.getIdentifier()); - }); - } else { - getSqlInterface().getStructureInterface().createDocPartTable(getDsl(), db.getIdentifier(), - newDocPart.getIdentifier(), tableRef, - col.getMetaDocPartByTableRef(tableRef.getParent().get()).getIdentifier()); - getSqlInterface().getStructureInterface() - .streamDocPartTableIndexesCreation(db.getIdentifier(), newDocPart.getIdentifier(), - tableRef, - col.getMetaDocPartByTableRef(tableRef.getParent().get()).getIdentifier()) - .forEach(consumer -> { - String index = consumer.apply(getDsl()); - LOGGER.info("Created internal index {} for table {}", index, - newDocPart.getIdentifier()); - }); - } - } - - @Override - public void addField(MetaDatabase db, MetaCollection col, MutableMetaDocPart docPart, - MetaField newField) throws UserException { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - getSqlInterface().getMetaDataWriteInterface().addMetaField(getDsl(), db, col, docPart, - newField); - getSqlInterface().getStructureInterface().addColumnToDocPartTable(getDsl(), db.getIdentifier(), - docPart.getIdentifier(), newField.getIdentifier(), getSqlInterface().getDataTypeProvider() - .getDataType(newField.getType())); - - List>> missingIndexes = col.getMissingIndexesForNewField(docPart, - newField); - - for (Tuple2> missingIndexEntry : missingIndexes) { - MetaIndex missingIndex = missingIndexEntry.v1(); - List identifiers = missingIndexEntry.v2(); - - MutableMetaDocPartIndex docPartIndex = docPart - .getOrCreatePartialMutableDocPartIndexForMissingIndexAndNewField( - missingIndex, identifiers, newField); - - if (missingIndex.isMatch(docPart, identifiers, docPartIndex)) { - List> columnList = new ArrayList<>(docPartIndex.size()); - for (String identifier : identifiers) { - MetaDocPartIndexColumn docPartIndexColumn = docPartIndex - .getMetaDocPartIndexColumnByIdentifier(identifier); - columnList.add(new Tuple2<>(docPartIndexColumn.getIdentifier(), docPartIndexColumn - .getOrdering().isAscending())); - } - MetaIdentifiedDocPartIndex identifiedDocPartIndex = docPartIndex.immutableCopy( - identifierFactory.toIndexIdentifier(db, docPart.getIdentifier(), columnList)); - - getSqlInterface().getMetaDataWriteInterface() - .addMetaDocPartIndex(getDsl(), db, col, docPart, identifiedDocPartIndex); - - for (String identifier : identifiers) { - MetaDocPartIndexColumn docPartIndexColumn = docPartIndex - .getMetaDocPartIndexColumnByIdentifier(identifier); - getSqlInterface().getMetaDataWriteInterface().addMetaDocPartIndexColumn(getDsl(), db, col, - docPart, identifiedDocPartIndex, docPartIndexColumn); - } - - getSqlInterface().getStructureInterface().createIndex(getDsl(), identifiedDocPartIndex - .getIdentifier(), db.getIdentifier(), - docPart.getIdentifier(), columnList, docPartIndex.isUnique()); - LOGGER.info("Created index {} for table {} associated to logical index {}.{}.{}", - identifiedDocPartIndex.getIdentifier(), docPart.getIdentifier(), db.getName(), col - .getName(), missingIndex.getName()); - } - } - } - - @Override - public void addScalar(MetaDatabase db, MetaCollection col, MetaDocPart docPart, - MetaScalar newScalar) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - getSqlInterface().getMetaDataWriteInterface().addMetaScalar(getDsl(), db, col, docPart, - newScalar); - getSqlInterface().getStructureInterface().addColumnToDocPartTable(getDsl(), db.getIdentifier(), - docPart.getIdentifier(), - newScalar.getIdentifier(), getSqlInterface().getDataTypeProvider().getDataType(newScalar - .getType())); - } - - @Override - public int consumeRids(MetaDatabase db, MetaCollection col, MetaDocPart docPart, int howMany) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - return getSqlInterface().getMetaDataWriteInterface().consumeRids(getDsl(), db, col, docPart, - howMany); - } - - @Override - public void insert(MetaDatabase db, MetaCollection col, DocPartData data) throws UserException { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - getSqlInterface().getWriteInterface().insertDocPartData(getDsl(), db.getIdentifier(), data); - } - - @Override - public void deleteDids(MetaDatabase db, MetaCollection col, Collection dids) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - if (dids.isEmpty()) { - return; - } - - getSqlInterface().getWriteInterface() - .deleteCollectionDocParts(getDsl(), db.getIdentifier(), col, dids); - } - - @Override - public void createIndex(MetaDatabase db, MutableMetaCollection col, MetaIndex index) throws - UserException { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - Preconditions.checkArgument(!index.isUnique() || index.streamTableRefs().count() == 1, - "composed unique indexes on fields of different subdocuments are not supported yet"); - - getSqlInterface().getMetaDataWriteInterface().addMetaIndex(getDsl(), db, col, index); - - Iterator indexFieldIterator = index.iteratorFields(); - while (indexFieldIterator.hasNext()) { - MetaIndexField field = indexFieldIterator.next(); - getSqlInterface().getMetaDataWriteInterface().addMetaIndexField(getDsl(), db, col, index, - field); - } - - createMissingDocPartIndexes(db, col, index); - } - - private void createIndex(MetaDatabase db, MetaCollection col, MetaIndex index, - MutableMetaDocPart docPart, - List identifiers) throws UserException { - MutableMetaDocPartIndex docPartIndex = docPart.addMetaDocPartIndex(index.isUnique()); - Iterator indexFieldIterator = index.iteratorMetaIndexFieldByTableRef( - docPart.getTableRef()); - int position = 0; - List> columnList = new ArrayList<>(identifiers.size()); - for (String identifier : identifiers) { - MetaIndexField indexField = indexFieldIterator.next(); - MetaDocPartIndexColumn docPartIndexColumn = docPartIndex.putMetaDocPartIndexColumn(position++, - identifier, indexField.getOrdering()); - columnList.add(new Tuple2<>(docPartIndexColumn.getIdentifier(), docPartIndexColumn - .getOrdering().isAscending())); - } - MetaIdentifiedDocPartIndex identifiedDocPartIndex = docPartIndex.immutableCopy(identifierFactory - .toIndexIdentifier(db, docPart.getIdentifier(), columnList)); - - getSqlInterface().getMetaDataWriteInterface().addMetaDocPartIndex(getDsl(), db, col, docPart, - identifiedDocPartIndex); - - for (String identifier : identifiers) { - MetaDocPartIndexColumn docPartIndexColumn = docPartIndex - .getMetaDocPartIndexColumnByIdentifier(identifier); - getSqlInterface().getMetaDataWriteInterface().addMetaDocPartIndexColumn(getDsl(), db, col, - docPart, identifiedDocPartIndex, docPartIndexColumn); - } - - getSqlInterface().getStructureInterface().createIndex( - getDsl(), identifiedDocPartIndex.getIdentifier(), db.getIdentifier(), docPart - .getIdentifier(), - columnList, index.isUnique()); - LOGGER.info("Created index {} for table {} associated to logical index {}.{}.{}", - identifiedDocPartIndex.getIdentifier(), docPart.getIdentifier(), db.getName(), - col.getName(), index.getName()); - } - - private void createMissingDocPartIndexes(MetaDatabase db, MutableMetaCollection col, - MetaIndex index) throws UserException { - Iterator tableRefIterator = index.streamTableRefs().iterator(); - while (tableRefIterator.hasNext()) { - TableRef tableRef = tableRefIterator.next(); - MutableMetaDocPart docPart = col.getMetaDocPartByTableRef(tableRef); - if (docPart != null && index.isCompatible(docPart)) { - Iterator> docPartIndexesFieldsIterator = - index.iteratorMetaDocPartIndexesIdentifiers(docPart); - - while (docPartIndexesFieldsIterator.hasNext()) { - List identifiers = docPartIndexesFieldsIterator.next(); - boolean containsExactDocPartIndex = docPart.streamIndexes() - .anyMatch(docPartIndex -> index.isMatch(docPart, identifiers, docPartIndex)); - if (!containsExactDocPartIndex) { - createIndex(db, col, index, docPart, identifiers); - } - } - } - } - } - - @Override - public void dropIndex(MetaDatabase db, MutableMetaCollection col, MetaIndex index) { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - getSqlInterface().getMetaDataWriteInterface().deleteMetaIndex(getDsl(), db, col, index); - Iterator tableRefIterator = index.streamTableRefs().iterator(); - while (tableRefIterator.hasNext()) { - TableRef tableRef = tableRefIterator.next(); - MutableMetaDocPart docPart = col.getMetaDocPartByTableRef(tableRef); - if (docPart != null) { - Iterator docPartIndexesIterator = - docPart.streamIndexes().iterator(); - - while (docPartIndexesIterator.hasNext()) { - MetaIdentifiedDocPartIndex docPartIndex = docPartIndexesIterator.next(); - if (index.isCompatible(docPart, docPartIndex)) { - boolean existsAnyOtherCompatibleIndex = col.streamContainedMetaIndexes() - .anyMatch(otherIndex -> otherIndex != index && otherIndex.isCompatible(docPart, - docPartIndex)); - if (!existsAnyOtherCompatibleIndex) { - dropIndex(db, col, docPart, docPartIndex); - LOGGER.info("Dropped index {} for table {}", docPartIndex.getIdentifier(), docPart - .getIdentifier()); - } - } - } - } - } - } - - private void dropIndex(MetaDatabase db, MetaCollection col, MutableMetaDocPart docPart, - MetaIdentifiedDocPartIndex docPartIndex) { - docPart.removeMetaDocPartIndexByIdentifier(docPartIndex.getIdentifier()); - - getSqlInterface().getMetaDataWriteInterface().deleteMetaDocPartIndex(getDsl(), db, col, docPart, - docPartIndex); - - getSqlInterface().getStructureInterface().dropIndex( - getDsl(), db.getIdentifier(), docPartIndex.getIdentifier()); - } - - @Override - public KvValue writeMetaInfo(MetaInfoKey key, KvValue newValue) { - return getBackendConnection().getMetaInfoHandler() - .writeMetaInfo(getDsl(), key, newValue); - } - - @Override - public void commit() throws UserException, RollbackException { - Preconditions.checkState(!isClosed(), "This transaction is closed"); - - try { - getConnection().commit(); - } catch (SQLException ex) { - getSqlInterface().getErrorHandler().handleUserException(Context.COMMIT, ex); - } finally { - getDsl().configuration().connectionProvider().release(getConnection()); - } - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/SqlBuilder.java b/engine/backend/common/src/main/java/com/torodb/backend/SqlBuilder.java deleted file mode 100644 index 252ba655..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/SqlBuilder.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -public class SqlBuilder { - - private final StringBuilder sb; - - public SqlBuilder(String init) { - this.sb = new StringBuilder(); - this.sb.append(init); - } - - public SqlBuilder(StringBuilder sb) { - this.sb = sb; - } - - public SqlBuilder append(String str) { - sb.append(str); - return this; - } - - public SqlBuilder append(char c) { - sb.append(c); - return this; - } - - public SqlBuilder quote(String str) { - sb.append('"').append(str).append('"'); - return this; - } - - public SqlBuilder quote(Enum enumValue) { - sb.append('"').append(enumValue.toString()).append('"'); - return this; - } - - public SqlBuilder table(String schema, String table) { - sb.append('"').append(schema).append("\".\"").append(table).append('"'); - return this; - } - - public SqlBuilder setLastChar(char c) { - sb.setCharAt(sb.length() - 1, c); - return this; - } - - public SqlBuilder setCharAt(int index, char c) { - sb.setCharAt(index, c); - return this; - } - - public int length() { - return sb.length(); - } - - public String toString() { - return sb.toString(); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/SqlHelper.java b/engine/backend/common/src/main/java/com/torodb/backend/SqlHelper.java deleted file mode 100644 index 5509801c..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/SqlHelper.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.backend.ErrorHandler.Context; -import com.torodb.backend.converters.jooq.DataTypeForKv; -import com.torodb.backend.converters.jooq.KvValueConverter; -import com.torodb.backend.converters.sql.SqlBinding; -import com.torodb.core.exceptions.user.UserException; -import com.torodb.core.transaction.metainf.FieldType; -import com.torodb.kvdocument.values.KvValue; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.jooq.Converter; -import org.jooq.DSLContext; -import org.jooq.Record; -import org.jooq.Result; -import org.jooq.impl.DSL; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; -import javax.inject.Singleton; - -@Singleton -@SuppressFBWarnings("SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING") -public class SqlHelper { - - private final DataTypeProvider dataTypeProvider; - private final ErrorHandler errorHandler; - - @Inject - public SqlHelper(DataTypeProvider dataTypeProvider, ErrorHandler errorHandler) { - super(); - this.dataTypeProvider = dataTypeProvider; - this.errorHandler = errorHandler; - } - - public void executeStatement(DSLContext dsl, String statement, Context context) { - Connection c = dsl.configuration().connectionProvider().acquire(); - try (PreparedStatement ps = c.prepareStatement(statement)) { - ps.execute(); - } catch (SQLException ex) { - throw errorHandler.handleException(context, ex); - } finally { - dsl.configuration().connectionProvider().release(c); - } - } - - @FunctionalInterface - public interface SetupPreparedStatement { - - public void accept(PreparedStatement ps) throws SQLException; - } - - public Result executeStatementWithResult(DSLContext dsl, String statement, - Context context) { - return executeStatementWithResult(dsl, statement, context, ps -> { - }); - } - - public Result executeStatementWithResult(DSLContext dsl, String statement, - Context context, - SetupPreparedStatement statementSetup) { - Connection c = dsl.configuration().connectionProvider().acquire(); - try (PreparedStatement ps = c.prepareStatement(statement)) { - statementSetup.accept(ps); - try (ResultSet resultSet = ps.executeQuery()) { - return dsl.fetch(resultSet); - } - } catch (SQLException ex) { - throw errorHandler.handleException(context, ex); - } finally { - dsl.configuration().connectionProvider().release(c); - } - } - - public int executeUpdate(DSLContext dsl, String statement, Context context) { - Connection c = dsl.configuration().connectionProvider().acquire(); - try (PreparedStatement ps = c.prepareStatement(statement)) { - return ps.executeUpdate(); - } catch (SQLException ex) { - throw errorHandler.handleException(context, ex); - } finally { - dsl.configuration().connectionProvider().release(c); - } - } - - public int executeUpdate(Connection c, String statement, Context context) { - try (PreparedStatement ps = c.prepareStatement(statement)) { - return ps.executeUpdate(); - } catch (SQLException ex) { - throw errorHandler.handleException(context, ex); - } - } - - public int executeUpdateOrThrow(DSLContext dsl, String statement, Context context) throws - UserException { - Connection c = dsl.configuration().connectionProvider().acquire(); - try (PreparedStatement ps = c.prepareStatement(statement)) { - return ps.executeUpdate(); - } catch (SQLException ex) { - throw errorHandler.handleUserException(context, ex); - } finally { - dsl.configuration().connectionProvider().release(c); - } - } - - public int executeUpdateOrThrow(Connection c, String statement, Context context) throws - UserException { - try (PreparedStatement ps = c.prepareStatement(statement)) { - return ps.executeUpdate(); - } catch (SQLException ex) { - throw errorHandler.handleUserException(context, ex); - } - } - - public String renderVal(String value) { - return dsl().render(DSL.val(value)); - } - - public DSLContext dsl() { - return DSL.using(dataTypeProvider.getDialect()); - } - - @SuppressWarnings({"rawtypes"}) - public Object getResultSetValue(FieldType fieldType, ResultSet resultSet, int index) throws - SQLException { - DataTypeForKv dataType = dataTypeProvider.getDataType(fieldType); - KvValueConverter valueConverter = dataType.getKvValueConverter(); - SqlBinding sqlBinding = valueConverter.getSqlBinding(); - return sqlBinding.get(resultSet, index); - } - - @SuppressWarnings({"unchecked"}) - public KvValue getResultSetKvValue(FieldType fieldType, DataTypeForKv dataTypeForKv, - ResultSet resultSet, int index) throws SQLException { - Object databaseValue = getResultSetValue(FieldType.from(dataTypeForKv.getKvValueConverter() - .getErasuredType()), resultSet, index); - if (resultSet.wasNull()) { - return null; - } - - return ((Converter>) dataTypeForKv.getConverter()).from(databaseValue); - } - - @SuppressWarnings({"rawtypes", "unchecked"}) - public void setPreparedStatementNullableValue(PreparedStatement preparedStatement, - int parameterIndex, - FieldType fieldType, KvValue value) throws SQLException { - DataTypeForKv dataType = dataTypeProvider.getDataType(fieldType); - if (value != null) { - KvValueConverter valueConverter = dataType.getKvValueConverter(); - SqlBinding sqlBinding = valueConverter.getSqlBinding(); - Converter converter = dataType.getConverter(); - sqlBinding.set(preparedStatement, parameterIndex, converter.to(value)); - } else { - preparedStatement.setNull(parameterIndex, dataType.getSQLType()); - } - } - - @SuppressWarnings({"rawtypes", "unchecked"}) - public void setPreparedStatementValue(PreparedStatement preparedStatement, int parameterIndex, - FieldType fieldType, KvValue value) throws SQLException { - DataTypeForKv dataType = dataTypeProvider.getDataType(fieldType); - KvValueConverter valueConverter = dataType.getKvValueConverter(); - Converter converter = dataType.getConverter(); - SqlBinding sqlBinding = valueConverter.getSqlBinding(); - sqlBinding.set(preparedStatement, parameterIndex, converter.to(value)); - } - - @SuppressWarnings({"rawtypes"}) - public String getPlaceholder(FieldType fieldType) { - DataTypeForKv dataType = dataTypeProvider.getDataType(fieldType); - KvValueConverter valueConverter = dataType.getKvValueConverter(); - SqlBinding sqlBinding = valueConverter.getSqlBinding(); - return sqlBinding.getPlaceholder(); - } - - @SuppressWarnings("rawtypes") - public String getSqlTypeName(FieldType fieldType) { - DataTypeForKv dataType = dataTypeProvider.getDataType(fieldType); - - return dataType.getTypeName(); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/SqlInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/SqlInterface.java deleted file mode 100644 index 750ac016..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/SqlInterface.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.backend.IdentifierConstraints; - -import javax.annotation.Nonnull; - -/** - * Wrapper interface to define all database-specific SQL code - */ -public interface SqlInterface { - - @Nonnull - MetaDataReadInterface getMetaDataReadInterface(); - - @Nonnull - MetaDataWriteInterface getMetaDataWriteInterface(); - - @Nonnull - DataTypeProvider getDataTypeProvider(); - - @Nonnull - StructureInterface getStructureInterface(); - - @Nonnull - ReadInterface getReadInterface(); - - @Nonnull - WriteInterface getWriteInterface(); - - @Nonnull - IdentifierConstraints getIdentifierConstraints(); - - @Nonnull - ErrorHandler getErrorHandler(); - - @Nonnull - DslContextFactory getDslContextFactory(); - - @Nonnull - DbBackendService getDbBackend(); - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/SqlInterfaceDelegate.java b/engine/backend/common/src/main/java/com/torodb/backend/SqlInterfaceDelegate.java deleted file mode 100644 index 471215a0..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/SqlInterfaceDelegate.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.backend.IdentifierConstraints; - -import javax.inject.Inject; - -public class SqlInterfaceDelegate implements SqlInterface { - - private final MetaDataReadInterface metaDataReadInterface; - private final MetaDataWriteInterface metaDataWriteInterface; - private final DataTypeProvider dataTypeProvider; - private final StructureInterface structureInterface; - private final ReadInterface readInterface; - private final WriteInterface writeInterface; - private final IdentifierConstraints identifierConstraints; - private final ErrorHandler errorHandler; - private final DslContextFactory dslContextFactory; - private final DbBackendService dbBackend; - - @Inject - public SqlInterfaceDelegate(MetaDataReadInterface metaDataReadInterface, - MetaDataWriteInterface metaDataWriteInterface, DataTypeProvider dataTypeProvider, - StructureInterface structureInterface, ReadInterface readInterface, - WriteInterface writeInterface, - IdentifierConstraints identifierConstraints, ErrorHandler errorHandler, - DslContextFactory dslContextFactory, DbBackendService dbBackend) { - super(); - this.metaDataReadInterface = metaDataReadInterface; - this.metaDataWriteInterface = metaDataWriteInterface; - this.dataTypeProvider = dataTypeProvider; - this.structureInterface = structureInterface; - this.readInterface = readInterface; - this.writeInterface = writeInterface; - this.identifierConstraints = identifierConstraints; - this.errorHandler = errorHandler; - this.dslContextFactory = dslContextFactory; - this.dbBackend = dbBackend; - } - - public MetaDataReadInterface getMetaDataReadInterface() { - return metaDataReadInterface; - } - - public MetaDataWriteInterface getMetaDataWriteInterface() { - return metaDataWriteInterface; - } - - public DataTypeProvider getDataTypeProvider() { - return dataTypeProvider; - } - - public StructureInterface getStructureInterface() { - return structureInterface; - } - - public ReadInterface getReadInterface() { - return readInterface; - } - - public WriteInterface getWriteInterface() { - return writeInterface; - } - - public IdentifierConstraints getIdentifierConstraints() { - return identifierConstraints; - } - - public ErrorHandler getErrorHandler() { - return errorHandler; - } - - public DslContextFactory getDslContextFactory() { - return dslContextFactory; - } - - public DbBackendService getDbBackend() { - return dbBackend; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/StructureInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/StructureInterface.java deleted file mode 100644 index 7d68d0b1..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/StructureInterface.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.backend.converters.jooq.DataTypeForKv; -import com.torodb.backend.meta.TorodbSchema; -import com.torodb.core.TableRef; -import com.torodb.core.exceptions.InvalidDatabaseException; -import com.torodb.core.exceptions.user.UserException; -import com.torodb.core.transaction.metainf.MetaCollection; -import com.torodb.core.transaction.metainf.MetaDatabase; -import com.torodb.core.transaction.metainf.MetaSnapshot; -import org.jooq.DSLContext; -import org.jooq.Meta; -import org.jooq.Schema; -import org.jooq.lambda.tuple.Tuple2; - -import java.util.List; -import java.util.Optional; -import java.util.function.Function; -import java.util.stream.Stream; - -import javax.annotation.Nonnull; - -public interface StructureInterface { - - void createSchema(@Nonnull DSLContext dsl, @Nonnull String schemaName); - - void dropDatabase(@Nonnull DSLContext dsl, @Nonnull MetaDatabase metaDatabase); - - void dropCollection(@Nonnull DSLContext dsl, @Nonnull String schemaName, - @Nonnull MetaCollection metaCollection); - - void renameCollection(@Nonnull DSLContext dsl, @Nonnull String fromSchemaName, - @Nonnull MetaCollection fromCollection, - @Nonnull String toSchemaName, @Nonnull MetaCollection toCollection); - - void createRootDocPartTable(@Nonnull DSLContext dsl, @Nonnull String schemaName, - @Nonnull String tableName, @Nonnull TableRef tableRef); - - void createDocPartTable(@Nonnull DSLContext dsl, @Nonnull String schemaName, - @Nonnull String tableName, @Nonnull TableRef tableRef, - @Nonnull String foreignTableName); - - /** - * Returns a stream of consumers that, when executed, creates the required indexes on a root doc - * part table. - * - * The returned stream is empty if the backend is not including the internal indexes - * - * @param schemaName - * @param tableName - * @param tableRef - * @return - * @see DbBackend#includeInternalIndexes() - */ - Stream> streamRootDocPartTableIndexesCreation(String schemaName, - String tableName, TableRef tableRef); - - /** - * Returns a stream of functions that, when executed, creates the required indexes on a doc part - * table and return a label that indicate the type of index created. - * - * The returned stream is empty if the backend is not including the internal indexes - * - * @param schemaName - * @param tableName - * @param tableRef - * @param foreignTableName - * @return - * @see DbBackend#includeInternalIndexes() - */ - Stream> streamDocPartTableIndexesCreation(String schemaName, - String tableName, TableRef tableRef, String foreignTableName); - - void addColumnToDocPartTable(@Nonnull DSLContext dsl, @Nonnull String schemaName, - @Nonnull String tableName, @Nonnull String columnName, @Nonnull DataTypeForKv dataType); - - /** - * Returns a stream of functions that, when executed, executes backend specific tasks that should - * be done once the data insert mode finishes and return a label that indicate the type of - * operation executed. - * - * For example, PostgreSQL backend would like to run analyze on the modified tables to get some - * stadistics. - * - * @param snapshot - * @return - */ - public Stream> streamDataInsertFinishTasks(MetaSnapshot snapshot); - - void createIndex(@Nonnull DSLContext dsl, @Nonnull String indexName, @Nonnull String tableSchema, - @Nonnull String tableName, @Nonnull List> columnList, boolean unique) - throws UserException; - - void dropIndex(@Nonnull DSLContext dsl, @Nonnull String schemaName, @Nonnull String indexName); - - /** - * Drops all torodb elements from the backend, including metatables and their content. - * - * After calling this method, ToroDB cannot use the underlying backend until metada is created - * again. - * - * @param dsl - */ - void dropAll(@Nonnull DSLContext dsl); - - /** - * Drops all user elements from the backend, including metatables content but not metatables. - * - * After calling this method, ToroDB sees the underlying backend as a fresh system, simmilar to - * the one that is present the first time ToroDB starts. - * - * @param dsl - */ - public void dropUserData(DSLContext dsl); - - Optional findTorodbSchema(@Nonnull DSLContext dsl, @Nonnull Meta jooqMeta); - - default Optional findTorodbSchema(@Nonnull DSLContext dsl) { - return findTorodbSchema(dsl, dsl.meta()); - } - - void checkMetaDataTables(@Nonnull Schema torodbSchema) throws InvalidDatabaseException; - - default void checkMetaDataTables(@Nonnull DSLContext dsl) throws InvalidDatabaseException { - Optional torodbSchema = findTorodbSchema(dsl); - if (!torodbSchema.isPresent()) { - throw new InvalidDatabaseException("Schema '" + TorodbSchema.IDENTIFIER + "' not found"); - } - checkMetaDataTables(torodbSchema.get()); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/TableRefComparator.java b/engine/backend/common/src/main/java/com/torodb/backend/TableRefComparator.java deleted file mode 100644 index 36e1b014..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/TableRefComparator.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.d2r.DocPartResult; - -import java.io.Serializable; -import java.util.Comparator; - -public class TableRefComparator { - - public static class MetaDocPart implements - Comparator, Serializable { - - private static final long serialVersionUID = 1L; - - public static final MetaDocPart ASC = new MetaDocPart(); - public static final DescMetaDocPart DESC = new DescMetaDocPart(); - - private MetaDocPart() { - } - - @Override - public int compare(com.torodb.core.transaction.metainf.MetaDocPart leftMetaDocPart, - com.torodb.core.transaction.metainf.MetaDocPart rightMetaDocPart) { - return leftMetaDocPart.getTableRef().getDepth() - rightMetaDocPart.getTableRef().getDepth(); - } - } - - private static class DescMetaDocPart implements - Comparator, Serializable { - - private static final long serialVersionUID = 1L; - - private DescMetaDocPart() { - } - - @Override - public int compare(com.torodb.core.transaction.metainf.MetaDocPart leftMetaDocPart, - com.torodb.core.transaction.metainf.MetaDocPart rightMetaDocPart) { - return rightMetaDocPart.getTableRef().getDepth() - leftMetaDocPart.getTableRef().getDepth(); - } - } - - public static class MutableMetaDocPart implements - Comparator, Serializable { - - private static final long serialVersionUID = 1L; - - public static final MutableMetaDocPart ASC = new MutableMetaDocPart(); - public static final DescMutableMetaDocPart DESC = new DescMutableMetaDocPart(); - - private MutableMetaDocPart() { - } - - @Override - public int compare(com.torodb.core.transaction.metainf.MetaDocPart leftMetaDocPart, - com.torodb.core.transaction.metainf.MetaDocPart rightMetaDocPart) { - return leftMetaDocPart.getTableRef().getDepth() - rightMetaDocPart.getTableRef().getDepth(); - } - } - - private static class DescMutableMetaDocPart implements - Comparator, Serializable { - - private static final long serialVersionUID = 1L; - - private DescMutableMetaDocPart() { - } - - @Override - public int compare(com.torodb.core.transaction.metainf.MetaDocPart leftMetaDocPart, - com.torodb.core.transaction.metainf.MetaDocPart rightMetaDocPart) { - return rightMetaDocPart.getTableRef().getDepth() - leftMetaDocPart.getTableRef().getDepth(); - } - } - - public static class DocPartResultSet implements Comparator, Serializable { - - private static final long serialVersionUID = 1L; - - public static final DocPartResultSet DESC = new DocPartResultSet(); - - private DocPartResultSet() { - } - - @Override - public int compare(DocPartResult leftDocPartResultSet, - DocPartResult rightDocPartResultSet) { - return rightDocPartResultSet.getMetaDocPart().getTableRef().getDepth() - leftDocPartResultSet - .getMetaDocPart().getTableRef().getDepth(); - } - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/TransactionIsolationLevel.java b/engine/backend/common/src/main/java/com/torodb/backend/TransactionIsolationLevel.java deleted file mode 100644 index c382bd95..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/TransactionIsolationLevel.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import java.sql.Connection; - -public enum TransactionIsolationLevel { - TRANSACTION_NONE(Connection.TRANSACTION_NONE), - TRANSACTION_READ_UNCOMMITTED(Connection.TRANSACTION_READ_UNCOMMITTED), - TRANSACTION_READ_COMMITTED(Connection.TRANSACTION_READ_COMMITTED), - TRANSACTION_REPEATABLE_READ(Connection.TRANSACTION_REPEATABLE_READ), - TRANSACTION_SERIALIZABLE(Connection.TRANSACTION_SERIALIZABLE); - - private final int level; - - private TransactionIsolationLevel(int level) { - this.level = level; - } - - public int level() { - return level; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/WriteInterface.java b/engine/backend/common/src/main/java/com/torodb/backend/WriteInterface.java deleted file mode 100644 index ffbe43c2..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/WriteInterface.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend; - -import com.torodb.core.cursors.Cursor; -import com.torodb.core.d2r.DocPartData; -import com.torodb.core.exceptions.user.UserException; -import com.torodb.core.transaction.metainf.MetaCollection; -import org.jooq.DSLContext; - -import java.util.Collection; - -import javax.annotation.Nonnull; - -public interface WriteInterface { - - void insertDocPartData(@Nonnull DSLContext dsl, @Nonnull String schemaName, - @Nonnull DocPartData docPartData) throws UserException; - - long deleteCollectionDocParts(@Nonnull DSLContext dsl, @Nonnull String schemaName, - @Nonnull MetaCollection metaCollection, @Nonnull Cursor didCursor); - - void deleteCollectionDocParts(@Nonnull DSLContext dsl, @Nonnull String schemaName, - @Nonnull MetaCollection metaCollection, @Nonnull Collection dids); - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/TableRefConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/TableRefConverter.java deleted file mode 100644 index 81b0a9f4..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/TableRefConverter.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters; - -import com.torodb.core.TableRef; -import com.torodb.core.TableRefFactory; - -import java.util.ArrayList; -import java.util.List; - -import javax.json.Json; -import javax.json.JsonArray; -import javax.json.JsonArrayBuilder; -import javax.json.JsonString; -import javax.json.JsonValue; - -public class TableRefConverter { - - private TableRefConverter() { - } - - public static String[] toStringArray(TableRef tableRef) { - List tableRefArray = new ArrayList<>(); - - while (!tableRef.isRoot()) { - String name = escapeTableRef(tableRef); - tableRefArray.add(0, name); - tableRef = tableRef.getParent().get(); - } - - return tableRefArray.toArray(new String[tableRefArray.size()]); - } - - public static TableRef fromStringArray(TableRefFactory tableRefFactory, String[] tableRefArray) { - TableRef tableRef = tableRefFactory.createRoot(); - - for (String tableRefName : tableRefArray) { - tableRef = createChild(tableRefFactory, tableRef, tableRefName); - } - - return tableRef; - } - - public static JsonArray toJsonArray(TableRef tableRef) { - JsonArrayBuilder tableRefJsonArrayBuilder = Json.createArrayBuilder(); - - for (String tableRefName : toStringArray(tableRef)) { - tableRefJsonArrayBuilder.add(tableRefName); - } - - return tableRefJsonArrayBuilder.build(); - } - - public static TableRef fromJsonArray(TableRefFactory tableRefFactory, - JsonArray tableRefJsonArray) { - TableRef tableRef = tableRefFactory.createRoot(); - - for (JsonValue tableRefNameValue : tableRefJsonArray) { - String tableRefName = ((JsonString) tableRefNameValue).getString(); - tableRef = createChild(tableRefFactory, tableRef, tableRefName); - } - - return tableRef; - } - - private static String escapeTableRef(TableRef tableRef) { - String name; - if (tableRef.isInArray()) { - name = "$" + tableRef.getArrayDimension(); - } else { - name = tableRef.getName().replace("$", "\\$"); - } - return name; - } - - private static TableRef createChild(TableRefFactory tableRefFactory, TableRef tableRef, - String tableRefName) { - if (isArrayDimension(tableRefName)) { - Integer dimension = Integer.valueOf(tableRefName.substring(1)); - tableRef = tableRefFactory.createChild(tableRef, dimension); - } else { - tableRef = tableRefFactory.createChild(tableRef, - unescapeTableRefName(tableRefName).intern()); - } - return tableRef; - } - - private static String unescapeTableRefName(String tableRefName) { - return tableRefName.replace("\\$", "$"); - } - - private static boolean isArrayDimension(String name) { - if (name.charAt(0) == '$') { - for (int index = 1; index < name.length(); index++) { - char charAt = name.charAt(index); - if (charAt >= '0' && charAt <= '9') { - return true; - } - } - } - return false; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/ValueConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/ValueConverter.java deleted file mode 100644 index 512ca99b..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/ValueConverter.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters; - -import com.torodb.kvdocument.values.KvValue; - -import java.io.Serializable; - -/** - * - */ -public interface ValueConverter> extends Serializable { - - Class getJsonClass(); - - Class getValueClass(); - - V toValue(J value); - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/ArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/ArrayConverter.java deleted file mode 100644 index e389a726..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/ArrayConverter.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvValue; - -import java.io.Serializable; - -import javax.json.JsonValue; - -public interface ArrayConverter> extends Serializable { - - String toJsonLiteral(V scalarValue); - - V fromJsonValue(J jsonValue); - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/BaseArrayToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/BaseArrayToArrayConverter.java deleted file mode 100644 index e21a22be..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/BaseArrayToArrayConverter.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvArray; -import com.torodb.kvdocument.values.KvValue; -import com.torodb.kvdocument.values.heap.ListKvArray; - -import java.util.ArrayList; -import java.util.List; - -import javax.json.JsonArray; -import javax.json.JsonValue; - -/** - * - */ -public abstract class BaseArrayToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - private final ValueToArrayConverterProvider valueToArrayConverterProvider; - - public BaseArrayToArrayConverter(ValueToArrayConverterProvider valueToArrayConverterProvider) { - super(); - this.valueToArrayConverterProvider = valueToArrayConverterProvider; - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - @Override - public String toJsonLiteral(KvArray value) { - StringBuilder sb = new StringBuilder(value.size() * 20); - sb.append("["); - for (KvValue child : value) { - sb.append(((ArrayConverter) valueToArrayConverterProvider.getConverter(child.getType())) - .toJsonLiteral(child)); - sb.append(","); - } - if (!value.isEmpty()) { - sb.delete(sb.length() - 1, sb.length()); - } - sb.append("]"); - return sb.toString(); - } - - @SuppressWarnings({"rawtypes", "unchecked"}) - @Override - public KvArray fromJsonValue(JsonArray value) { - List> list = new ArrayList<>(value.size()); - for (JsonValue child : value) { - ArrayConverter converter = valueToArrayConverterProvider.fromJsonValue(child); - list.add(converter.fromJsonValue(child)); - } - return new ListKvArray(list); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/BinaryToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/BinaryToArrayConverter.java deleted file mode 100644 index 9d8c0391..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/BinaryToArrayConverter.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.google.common.io.ByteSource; -import com.torodb.common.util.HexUtils; -import com.torodb.kvdocument.values.KvBinary; -import com.torodb.kvdocument.values.KvBinary.KvBinarySubtype; -import com.torodb.kvdocument.values.heap.ByteSourceKvBinary; - -import javax.json.JsonString; - -/** - * - */ -public class BinaryToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvBinary value) { - return value.toString(); - } - - @Override - public KvBinary fromJsonValue(JsonString value) { - byte[] bytes = HexUtils.hex2Bytes(value.getString()); - return new ByteSourceKvBinary( - KvBinarySubtype.MONGO_GENERIC, - (byte) 0, - ByteSource.wrap(bytes) - ); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/BooleanToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/BooleanToArrayConverter.java deleted file mode 100644 index 20213d70..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/BooleanToArrayConverter.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvBoolean; - -import javax.json.JsonValue; - -/** - * - */ -public class BooleanToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvBoolean value) { - return value.getValue() ? "true" : "false"; - } - - @Override - public KvBoolean fromJsonValue(JsonValue value) { - if (value != JsonValue.TRUE && value != JsonValue.FALSE) { - throw new AssertionError(value + " is not boolean value"); - } - - return KvBoolean.from(value == JsonValue.TRUE); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/DateToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/DateToArrayConverter.java deleted file mode 100644 index 85d41bc1..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/DateToArrayConverter.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvDate; -import com.torodb.kvdocument.values.heap.LocalDateKvDate; -import org.jooq.tools.json.JSONValue; - -import java.time.LocalDate; - -import javax.json.JsonString; - -/** - * - */ -public class DateToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvDate value) { - return JSONValue.toJSONString(value.toString()); - } - - @Override - public KvDate fromJsonValue(JsonString value) { - return new LocalDateKvDate(LocalDate.parse(value.getString())); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/DoubleToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/DoubleToArrayConverter.java deleted file mode 100644 index be198725..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/DoubleToArrayConverter.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvDouble; -import org.jooq.tools.json.JSONValue; - -import javax.json.JsonNumber; - -/** - * - */ -public class DoubleToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvDouble value) { - return JSONValue.toJSONString(value.getValue()); - } - - @Override - public KvDouble fromJsonValue(JsonNumber value) { - return KvDouble.of(value.doubleValue()); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/InstantToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/InstantToArrayConverter.java deleted file mode 100644 index 3fd29dba..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/InstantToArrayConverter.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvInstant; -import com.torodb.kvdocument.values.heap.InstantKvInstant; -import org.jooq.tools.json.JSONValue; - -import java.time.Instant; -import java.time.format.DateTimeFormatter; - -import javax.json.JsonString; - -/** - * - */ -public class InstantToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvInstant value) { - return JSONValue.toJSONString(value.toString()); - } - - @Override - public KvInstant fromJsonValue(JsonString value) { - return new InstantKvInstant(Instant.from(DateTimeFormatter.ISO_OFFSET_DATE_TIME.parse(value - .getString()))); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/IntegerToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/IntegerToArrayConverter.java deleted file mode 100644 index c49164c6..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/IntegerToArrayConverter.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvInteger; -import org.jooq.tools.json.JSONValue; - -import javax.json.JsonNumber; - -/** - * - */ -public class IntegerToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvInteger value) { - return JSONValue.toJSONString(value.getValue()); - } - - @Override - public KvInteger fromJsonValue(JsonNumber value) { - return KvInteger.of(value.intValue()); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/LongToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/LongToArrayConverter.java deleted file mode 100644 index 422e5d19..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/LongToArrayConverter.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvLong; -import org.jooq.tools.json.JSONValue; - -import javax.json.JsonNumber; - -/** - * - */ -public class LongToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvLong value) { - return JSONValue.toJSONString(value.getValue()); - } - - @Override - public KvLong fromJsonValue(JsonNumber value) { - return KvLong.of(value.longValue()); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/MongoObjectIdToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/MongoObjectIdToArrayConverter.java deleted file mode 100644 index 42b7a5e1..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/MongoObjectIdToArrayConverter.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.common.util.HexUtils; -import com.torodb.kvdocument.values.KvMongoObjectId; -import com.torodb.kvdocument.values.heap.ByteArrayKvMongoObjectId; -import org.jooq.tools.json.JSONValue; - -import javax.json.JsonString; - -/** - * - */ -public class MongoObjectIdToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvMongoObjectId value) { - return JSONValue.toJSONString(value.toString()); - } - - @Override - public KvMongoObjectId fromJsonValue(JsonString value) { - byte[] bytes = HexUtils.hex2Bytes(value.toString()); - return new ByteArrayKvMongoObjectId(bytes); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/MongoTimestampToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/MongoTimestampToArrayConverter.java deleted file mode 100644 index 711086fc..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/MongoTimestampToArrayConverter.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.backend.udt.MongoTimestampUDT; -import com.torodb.kvdocument.values.KvMongoTimestamp; -import com.torodb.kvdocument.values.heap.DefaultKvMongoTimestamp; - -import javax.json.Json; -import javax.json.JsonObject; - -/** - * - */ -public class MongoTimestampToArrayConverter - implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - private static final String SECS = MongoTimestampUDT.SECS.getName(); - private static final String COUNTER = MongoTimestampUDT.COUNTER.getName(); - - @Override - public String toJsonLiteral(KvMongoTimestamp value) { - return Json.createObjectBuilder() - .add(SECS, value.getSecondsSinceEpoch()) - .add(COUNTER, value.getOrdinal()) - .build().toString(); - } - - @Override - public KvMongoTimestamp fromJsonValue(JsonObject value) { - assert isValid(value); - return new DefaultKvMongoTimestamp(value.getInt(SECS), value.getInt(COUNTER)); - } - - public boolean isValid(JsonObject object) { - try { - object.getInt(SECS); - object.getInt(COUNTER); - return true; - } catch (NullPointerException | ClassCastException ex) { - return false; - } - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/NullToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/NullToArrayConverter.java deleted file mode 100644 index c9f05db7..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/NullToArrayConverter.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvNull; - -import javax.json.JsonValue; - -/** - * - */ -public class NullToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvNull value) { - return "null"; - } - - @Override - public KvNull fromJsonValue(JsonValue value) { - if (value != JsonValue.NULL) { - throw new AssertionError(value + " is not null value"); - } - - return KvNull.getInstance(); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/StringToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/StringToArrayConverter.java deleted file mode 100644 index f22cf848..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/StringToArrayConverter.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvString; -import com.torodb.kvdocument.values.heap.StringKvString; -import org.jooq.tools.json.JSONValue; - -import javax.json.JsonString; - -/** - * - */ -public class StringToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvString value) { - return JSONValue.toJSONString(value.getValue()); - } - - @Override - public KvString fromJsonValue(JsonString value) { - return new StringKvString(value.getString()); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/TimeToArrayConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/TimeToArrayConverter.java deleted file mode 100644 index 91d4ba54..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/TimeToArrayConverter.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - -import com.torodb.kvdocument.values.KvTime; -import com.torodb.kvdocument.values.heap.LocalTimeKvTime; -import org.jooq.tools.json.JSONValue; - -import java.time.LocalTime; - -import javax.json.JsonString; - -/** - * - */ -public class TimeToArrayConverter implements ArrayConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String toJsonLiteral(KvTime value) { - return JSONValue.toJSONString(value.toString()); - } - - @Override - public KvTime fromJsonValue(JsonString value) { - return new LocalTimeKvTime(LocalTime.parse(value.toString())); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/ValueToArrayConverterProvider.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/array/ValueToArrayConverterProvider.java deleted file mode 100644 index 2ae70190..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/array/ValueToArrayConverterProvider.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.array; - - -import com.torodb.kvdocument.types.KvType; - -import java.io.Serializable; - -import javax.annotation.Nonnull; -import javax.json.JsonValue; - -/** - * - */ -public interface ValueToArrayConverterProvider extends Serializable { - - @Nonnull - public ArrayConverter getConverter(KvType valueType); - - @Nonnull - public ArrayConverter fromJsonValue(JsonValue jsonValue); - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/ArrayToJooqConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/ArrayToJooqConverter.java deleted file mode 100644 index 7ad38933..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/ArrayToJooqConverter.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.jooq; - -import com.torodb.backend.converters.array.ArrayConverter; -import com.torodb.kvdocument.values.KvValue; -import org.jooq.Converter; -import org.jooq.DataType; -import org.jooq.impl.DefaultDataType; - -import javax.json.JsonValue; - -public class ArrayToJooqConverter> implements Converter { - - private static final long serialVersionUID = 1L; - - public static , V extends JsonValue> DataType fromScalarValue( - final Class type, final ArrayConverter arrayConverter, String typeName) { - Converter converter = new ArrayToJooqConverter<>(type, arrayConverter); - return new DefaultDataType<>(null, String.class, typeName).asConvertedDataType(converter); - } - - private final Class type; - private final ArrayConverter arrayConverter; - - public ArrayToJooqConverter(Class type, ArrayConverter arrayConverter) { - super(); - this.type = type; - this.arrayConverter = arrayConverter; - } - - @Override - public UT from(String databaseObject) { - throw new RuntimeException("This conversor should not be used to convert from a database " - + "object"); - } - - @Override - public String to(UT userObject) { - return arrayConverter.toJsonLiteral(userObject); - } - - @Override - public Class fromType() { - return String.class; - } - - @Override - public Class toType() { - return type; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/DataTypeForKv.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/DataTypeForKv.java deleted file mode 100644 index bd05cb62..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/DataTypeForKv.java +++ /dev/null @@ -1,426 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.jooq; - -import com.torodb.kvdocument.values.KvValue; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.jooq.Binding; -import org.jooq.BindingGetResultSetContext; -import org.jooq.BindingGetSQLInputContext; -import org.jooq.BindingGetStatementContext; -import org.jooq.BindingRegisterContext; -import org.jooq.BindingSQLContext; -import org.jooq.BindingSetSQLOutputContext; -import org.jooq.BindingSetStatementContext; -import org.jooq.Configuration; -import org.jooq.Converter; -import org.jooq.DataType; -import org.jooq.EnumType; -import org.jooq.Field; -import org.jooq.SQLDialect; - -import java.sql.SQLException; -import java.util.Collection; -import java.util.List; - -public class DataTypeForKv> implements DataType { - - private static final long serialVersionUID = 1L; - - @SuppressWarnings({"unchecked", "rawtypes"}) - public static > DataTypeForKv from(DataType
dataType, - KvValueConverter converter) { - return new DataTypeForKv<>(dataType.asConvertedDataType(new KvChainConverter(dataType - .getConverter(), converter)), converter); - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - public static > DataTypeForKv from(DataType
dataType, - KvValueConverter converter, int sqlType) { - return new DataTypeForKv<>(dataType.asConvertedDataType(new KvChainConverter(dataType - .getConverter(), converter)), converter, sqlType); - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - public static > DataTypeForKv from(DataType
dataType, - KvValueConverter converter, Binding binding) { - return new DataTypeForKv<>(dataType.asConvertedDataType(new KvChainBinding(binding, dataType - .getConverter(), converter)), converter); - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - public static > DataTypeForKv from(DataType
dataType, - KvValueConverter converter, Binding binding, int sqlType) { - return new DataTypeForKv<>(dataType.asConvertedDataType(new KvChainBinding(binding, dataType - .getConverter(), converter)), converter, sqlType); - } - - private final DataType dataType; - private final int sqlType; - private final KvValueConverter kvValueConverter; - - private DataTypeForKv(DataType dataType, KvValueConverter kvValueConverter) { - super(); - this.dataType = dataType; - this.sqlType = dataType.getSQLType(); - this.kvValueConverter = kvValueConverter; - } - - private DataTypeForKv(DataType dataType, KvValueConverter kvValueConverter, - int sqlType) { - super(); - this.dataType = dataType; - this.sqlType = sqlType; - this.kvValueConverter = kvValueConverter; - } - - public KvValueConverter getKvValueConverter() { - return kvValueConverter; - } - - @SuppressFBWarnings(value = "NM_CONFUSING", justification = "we cannot " - + "change the name of a jOOQ method. And it goes against the code" - + "style") - @Override - public int getSQLType() { - return sqlType; - } - - @Override - public DataType getSQLDataType() { - return dataType.getSQLDataType(); - } - - @Override - public DataType getDataType(Configuration configuration) { - return dataType.getDataType(configuration); - } - - @Override - public Binding getBinding() { - return dataType.getBinding(); - } - - @Override - public Converter getConverter() { - return dataType.getConverter(); - } - - @Override - public Class getType() { - return dataType.getType(); - } - - @Override - public Class getArrayType() { - return dataType.getArrayType(); - } - - @Override - public DataType getArrayDataType() { - return dataType.getArrayDataType(); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType asEnumDataType(Class enumDataType) { - DataType dataType = this.dataType.asEnumDataType(enumDataType); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType asConvertedDataType(Converter converter) { - DataType dataType = this.dataType.asConvertedDataType(converter); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType asConvertedDataType(Binding binding) { - DataType dataType = this.dataType.asConvertedDataType(binding); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - public String getTypeName() { - return dataType.getTypeName(); - } - - @Override - public String getTypeName(Configuration configuration) { - return dataType.getTypeName(configuration); - } - - @Override - public String getCastTypeName() { - return dataType.getCastTypeName(); - } - - @Override - public String getCastTypeName(Configuration configuration) { - return dataType.getCastTypeName(configuration); - } - - @Override - public SQLDialect getDialect() { - return dataType.getDialect(); - } - - @Override - public T convert(Object object) { - return dataType.convert(object); - } - - @Override - public T[] convert(Object... objects) { - return dataType.convert(objects); - } - - @Override - public List convert(Collection objects) { - return dataType.convert(objects); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType nullable(boolean nullable) { - DataType dataType = this.dataType.nullable(nullable); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - public boolean nullable() { - return dataType.nullable(); - } - - @Override - @Deprecated - public DataType defaulted(boolean defaulted) { - return dataType.defaulted(defaulted); - } - - @Override - public boolean defaulted() { - return dataType.defaulted(); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType precision(int precision) { - DataType dataType = this.dataType.precision(precision); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType precision(int precision, int scale) { - DataType dataType = this.dataType.precision(precision, scale); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - public int precision() { - return dataType.precision(); - } - - @Override - public boolean hasPrecision() { - return dataType.hasPrecision(); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType scale(int scale) { - DataType dataType = this.dataType.scale(scale); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - public int scale() { - return dataType.scale(); - } - - @Override - public boolean hasScale() { - return dataType.hasScale(); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType length(int length) { - DataType dataType = this.dataType.length(length); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - public int length() { - return dataType.length(); - } - - @Override - public boolean hasLength() { - return dataType.hasLength(); - } - - @Override - public boolean isNumeric() { - return dataType.isNumeric(); - } - - @Override - public boolean isString() { - return dataType.isString(); - } - - @Override - public boolean isDateTime() { - return dataType.isDateTime(); - } - - @Override - public boolean isTemporal() { - return dataType.isTemporal(); - } - - @Override - public boolean isInterval() { - return dataType.isInterval(); - } - - @Override - public boolean isBinary() { - return dataType.isBinary(); - } - - @Override - public boolean isLob() { - return dataType.isLob(); - } - - @Override - public boolean isArray() { - return dataType.isArray(); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType defaultValue(T defaultValue) { - DataType dataType = this.dataType.defaultValue(defaultValue); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public DataType defaultValue(Field defaultValue) { - DataType dataType = this.dataType.defaultValue(defaultValue); - return new DataTypeForKv(dataType, kvValueConverter); - } - - @Override - public Field defaultValue() { - return dataType.defaultValue(); - } - - public static class KvChainConverter - implements Converter { - - private static final long serialVersionUID = 1L; - - private final Converter leftConverter; - private final Converter rightConverter; - - public KvChainConverter(Converter leftConverter, - Converter rightConverter) { - super(); - this.leftConverter = leftConverter; - this.rightConverter = rightConverter; - } - - @Override - public WrappedT from(NewT databaseObject) { - return rightConverter.from(leftConverter.from(databaseObject)); - } - - @Override - public NewT to(WrappedT userObject) { - if (userObject == null) { - return null; - } - return leftConverter.to(rightConverter.to(userObject)); - } - - @Override - public Class fromType() { - return leftConverter.fromType(); - } - - @Override - public Class toType() { - return rightConverter.toType(); - } - } - - public static class KvChainBinding implements Binding { - - private static final long serialVersionUID = 1L; - - private final Binding delegate; - private final KvChainConverter chainConverter; - - public KvChainBinding(Binding delegate, Converter leftConverter, - Converter rightConverter) { - super(); - this.delegate = delegate; - this.chainConverter = new KvChainConverter<>(leftConverter, rightConverter); - } - - public Converter converter() { - return chainConverter; - } - - public void sql(BindingSQLContext ctx) throws SQLException { - delegate.sql(ctx); - } - - public void register(BindingRegisterContext ctx) throws SQLException { - delegate.register(ctx); - } - - public void set(BindingSetStatementContext ctx) throws SQLException { - delegate.set(ctx); - } - - public void set(BindingSetSQLOutputContext ctx) throws SQLException { - delegate.set(ctx); - } - - public void get(BindingGetResultSetContext ctx) throws SQLException { - delegate.get(ctx); - } - - public void get(BindingGetStatementContext ctx) throws SQLException { - delegate.get(ctx); - } - - public void get(BindingGetSQLInputContext ctx) throws SQLException { - delegate.get(ctx); - } - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/FieldTypeConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/FieldTypeConverter.java deleted file mode 100644 index 590bcc2e..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/FieldTypeConverter.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.jooq; - -import com.torodb.core.transaction.metainf.FieldType; -import org.jooq.Converter; -import org.jooq.DataType; -import org.jooq.impl.SQLDataType; - -/** - * - */ -public class FieldTypeConverter implements Converter { - - private static final long serialVersionUID = 1L; - - public static final DataType TYPE = SQLDataType.VARCHAR.asConvertedDataType( - new FieldTypeConverter()); - - @Override - public FieldType from(String databaseObject) { - return FieldType.valueOf(databaseObject); - } - - @Override - public String to(FieldType userObject) { - return userObject.name(); - } - - @Override - public Class fromType() { - return String.class; - } - - @Override - public Class toType() { - return FieldType.class; - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/KvValueConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/KvValueConverter.java deleted file mode 100644 index 561e003b..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/KvValueConverter.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.jooq; - -import com.torodb.backend.converters.sql.SqlBinding; -import com.torodb.kvdocument.types.KvType; -import com.torodb.kvdocument.values.KvValue; -import org.jooq.Converter; - -/** - * - * @param data base type - * @param an intermediate JDBC-friendly Java type - * @param a value of the given {@code KvValue} type - */ -public interface KvValueConverter> - extends Converter { - - public KvType getErasuredType(); - - public SqlBinding getSqlBinding(); -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/OrderingConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/OrderingConverter.java deleted file mode 100644 index 4c8e465b..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/OrderingConverter.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.jooq; - -import com.torodb.core.transaction.metainf.FieldIndexOrdering; -import org.jooq.Converter; -import org.jooq.DataType; -import org.jooq.impl.SQLDataType; - -/** - * - */ -public class OrderingConverter implements Converter { - - private static final long serialVersionUID = 1L; - - public static final DataType TYPE = SQLDataType.VARCHAR.asConvertedDataType( - new OrderingConverter()); - - @Override - public FieldIndexOrdering from(String databaseObject) { - return FieldIndexOrdering.valueOf(databaseObject); - } - - @Override - public String to(FieldIndexOrdering userObject) { - return userObject.name(); - } - - @Override - public Class fromType() { - return String.class; - } - - @Override - public Class toType() { - return FieldIndexOrdering.class; - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/ToroIndexConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/ToroIndexConverter.java deleted file mode 100644 index 0aba0877..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/ToroIndexConverter.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.jooq; - -import com.google.common.collect.Sets; -import com.torodb.core.language.AttributeReference; -import com.torodb.core.model.DefaultNamedToroIndex; -import com.torodb.core.model.IndexedAttributes; -import com.torodb.core.model.IndexedAttributes.IndexType; -import com.torodb.core.model.NamedToroIndex; -import org.jooq.Converter; - -import java.io.StringReader; -import java.io.StringWriter; -import java.util.Collections; -import java.util.Map; -import java.util.Set; -import java.util.StringTokenizer; - -import javax.json.Json; -import javax.json.JsonArray; -import javax.json.JsonArrayBuilder; -import javax.json.JsonObject; -import javax.json.JsonObjectBuilder; -import javax.json.JsonReader; -import javax.json.JsonWriter; - -public class ToroIndexConverter implements Converter { - - private static final long serialVersionUID = 1L; - - private final String databaseName; - private final String collectionName; - - private static final String ATTS_KEY = "atts"; - private static final String UNIQUE_KEY = "unique"; - private static final String NAME_KEY = "key"; - private static final String DESCENDING = "desc"; - - public ToroIndexConverter(String databaseName, String collectionName) { - this.databaseName = databaseName; - this.collectionName = collectionName; - } - - @Override - public NamedToroIndex from(String databaseObject) { - JsonReader reader = Json.createReader(new StringReader(databaseObject)); - JsonObject object = reader.readObject(); - - IndexedAttributes.Builder builder = new IndexedAttributes.Builder(); - JsonArray attsArray = object.getJsonArray(ATTS_KEY); - Set descendingAttPos; - if (object.containsKey(DESCENDING)) { - JsonArray descArray = object.getJsonArray(DESCENDING); - descendingAttPos = Sets.newHashSetWithExpectedSize(descArray.size()); - for (int i = 0; i < descArray.size(); i++) { - descendingAttPos.add(descArray.getInt(i)); - } - } else { - descendingAttPos = Collections.emptySet(); - } - - for (int i = 0; i < attsArray.size(); i++) { - String att = attsArray.getString(i); - AttributeReference attRef = parseAttRef(att); - if (descendingAttPos.contains(i)) { - builder.addAttribute(attRef, IndexType.desc); - } else { - builder.addAttribute(attRef, IndexType.asc); - } - } - - return new DefaultNamedToroIndex( - object.getString(NAME_KEY), - builder.build(), - databaseName, - collectionName, - object.getBoolean(UNIQUE_KEY, false) - ); - } - - @Override - public String to(NamedToroIndex userObject) { - JsonObjectBuilder objectBuilder = Json.createObjectBuilder(); - objectBuilder.add(NAME_KEY, userObject.getName()); - if (userObject.isUnique()) { - objectBuilder.add(UNIQUE_KEY, true); - } - - JsonArrayBuilder attsBuilder = Json.createArrayBuilder(); - JsonArrayBuilder descBuilder = Json.createArrayBuilder(); - int attPosition = 0; - boolean hasDescending = false; - for (Map.Entry entry : userObject.getAttributes().entrySet()) { - attsBuilder.add(entry.getKey().toString()); - - if (IndexType.desc.equals(entry.getValue())) { - descBuilder.add(attPosition); - hasDescending = true; - } - attPosition++; - } - objectBuilder.add(ATTS_KEY, attsBuilder); - if (hasDescending) { - objectBuilder.add(DESCENDING, descBuilder); - } - - StringWriter stringWriter = new StringWriter(200); - - JsonWriter jsonWriter = Json.createWriter(stringWriter); - jsonWriter.writeObject(objectBuilder.build()); - return stringWriter.toString(); - } - - @Override - public Class fromType() { - return String.class; - } - - @Override - public Class toType() { - return NamedToroIndex.class; - } - - private AttributeReference parseAttRef(String key) { - //TODO: check attributes with '\.' characters - //TODO: Check attributes references with array keys - StringTokenizer tk = new StringTokenizer(key, "."); - AttributeReference.Builder attRefBuilder = new AttributeReference.Builder(); - while (tk.hasMoreTokens()) { - attRefBuilder.addObjectKey(tk.nextToken()); - } - return attRefBuilder.build(); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/ValueToJooqDataTypeProvider.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/ValueToJooqDataTypeProvider.java deleted file mode 100644 index f8512d02..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/jooq/ValueToJooqDataTypeProvider.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.jooq; - -import com.torodb.core.transaction.metainf.FieldType; - -import java.io.Serializable; - -import javax.annotation.Nonnull; - -/** - * - */ -public interface ValueToJooqDataTypeProvider extends Serializable { - - @Nonnull - public DataTypeForKv getDataType(@Nonnull FieldType type); -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/BaseArrayValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/BaseArrayValueToJsonConverter.java deleted file mode 100644 index fca4c5e7..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/BaseArrayValueToJsonConverter.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.backend.converters.array.ArrayConverter; -import com.torodb.backend.converters.array.ValueToArrayConverterProvider; -import com.torodb.kvdocument.values.KvArray; -import com.torodb.kvdocument.values.KvValue; -import com.torodb.kvdocument.values.heap.ListKvArray; - -import java.util.ArrayList; -import java.util.List; - -import javax.json.JsonArray; -import javax.json.JsonValue; - -/** - * - */ -public abstract class BaseArrayValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - private final ValueToArrayConverterProvider valueToArrayConverterProvider; - - public BaseArrayValueToJsonConverter( - ValueToArrayConverterProvider valueToArrayConverterProvider) { - this.valueToArrayConverterProvider = valueToArrayConverterProvider; - } - - @Override - public Class getJsonClass() { - return JsonArray.class; - } - - @Override - public Class getValueClass() { - return KvArray.class; - } - - @SuppressWarnings({"rawtypes", "unchecked"}) - @Override - public KvArray toValue(JsonArray value) { - List> list = new ArrayList<>(value.size()); - for (JsonValue child : value) { - ArrayConverter converter = valueToArrayConverterProvider.fromJsonValue(child); - list.add(converter.fromJsonValue(child)); - } - return new ListKvArray(list); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/BinaryValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/BinaryValueToJsonConverter.java deleted file mode 100644 index 4a3dec11..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/BinaryValueToJsonConverter.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.google.common.io.ByteSource; -import com.torodb.backend.converters.ValueConverter; -import com.torodb.common.util.HexUtils; -import com.torodb.kvdocument.values.KvBinary; -import com.torodb.kvdocument.values.KvBinary.KvBinarySubtype; -import com.torodb.kvdocument.values.heap.ByteSourceKvBinary; - -/** - * - */ -public class BinaryValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return String.class; - } - - @Override - public Class getValueClass() { - return KvBinary.class; - } - - @Override - public KvBinary toValue(String value) { - if (!value.startsWith("\\x")) { - throw new RuntimeException( - "A bytea in escape format was expected, but " + value - + " was found" - ); - } - return new ByteSourceKvBinary( - KvBinarySubtype.MONGO_GENERIC, - (byte) 0, - ByteSource.wrap(HexUtils.hex2Bytes(value.substring(2))) - ); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/BooleanValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/BooleanValueToJsonConverter.java deleted file mode 100644 index 00b98a73..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/BooleanValueToJsonConverter.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.values.KvBoolean; - -/** - * - */ -public class BooleanValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return Boolean.class; - } - - @Override - public Class getValueClass() { - return KvBoolean.class; - } - - @Override - public KvBoolean toValue(Boolean value) { - return KvBoolean.from(value); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/DateValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/DateValueToJsonConverter.java deleted file mode 100644 index f9331f9b..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/DateValueToJsonConverter.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.values.KvDate; -import com.torodb.kvdocument.values.heap.LocalDateKvDate; - -import java.time.LocalDate; - -/** - * - */ -public class DateValueToJsonConverter implements ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return String.class; - } - - @Override - public Class getValueClass() { - return KvDate.class; - } - - @Override - public KvDate toValue(String value) { - return new LocalDateKvDate(LocalDate.parse(value)); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/DoubleValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/DoubleValueToJsonConverter.java deleted file mode 100644 index 7bcceab5..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/DoubleValueToJsonConverter.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.values.KvDouble; - -/** - * - */ -public class DoubleValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return Double.class; - } - - @Override - public Class getValueClass() { - return KvDouble.class; - } - - @Override - public KvDouble toValue(Object value) { - if (value instanceof Number) { - Number number = (Number) value; - return KvDouble.of(number.doubleValue()); - } - if (value instanceof String) { - String string = (String) value; - if (string.equals("Infinity")) { - return KvDouble.of(Double.POSITIVE_INFINITY); - } - if (string.equals("-Infinity")) { - return KvDouble.of(Double.NEGATIVE_INFINITY); - } - if (string.equals("NaN")) { - return KvDouble.of(Double.NaN); - } - } - throw new IllegalArgumentException( - "KVValue " + value + " has not been recognized as double value" - ); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/InstantValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/InstantValueToJsonConverter.java deleted file mode 100644 index 255a31b7..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/InstantValueToJsonConverter.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.values.KvInstant; -import com.torodb.kvdocument.values.heap.InstantKvInstant; - -import java.time.Instant; -import java.time.format.DateTimeFormatter; - -/** - * - */ -public class InstantValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return String.class; - } - - @Override - public Class getValueClass() { - return KvInstant.class; - } - - @Override - public KvInstant toValue(String value) { - return new InstantKvInstant(Instant.from(DateTimeFormatter.ISO_OFFSET_DATE_TIME.parse(value))); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/IntegerValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/IntegerValueToJsonConverter.java deleted file mode 100644 index ec9f0af8..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/IntegerValueToJsonConverter.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.values.KvInteger; - -/** - * - */ -public class IntegerValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return Integer.class; - } - - @Override - public Class getValueClass() { - return KvInteger.class; - } - - @Override - public KvInteger toValue(Number value) { - return KvInteger.of(value.intValue()); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/LongValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/LongValueToJsonConverter.java deleted file mode 100644 index a9f45cd6..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/LongValueToJsonConverter.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.values.KvLong; - -/** - * - */ -public class LongValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return Long.class; - } - - @Override - public Class getValueClass() { - return KvLong.class; - } - - @Override - public KvLong toValue(Number value) { - return KvLong.of(value.longValue()); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/MongoObjectIdValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/MongoObjectIdValueToJsonConverter.java deleted file mode 100644 index 9f736b5a..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/MongoObjectIdValueToJsonConverter.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.common.util.HexUtils; -import com.torodb.kvdocument.values.KvMongoObjectId; -import com.torodb.kvdocument.values.heap.ByteArrayKvMongoObjectId; - -/** - * - */ -public class MongoObjectIdValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return String.class; - } - - @Override - public Class getValueClass() { - return KvMongoObjectId.class; - } - - @Override - public KvMongoObjectId toValue(String value) { - if (!value.startsWith("\\x")) { - throw new RuntimeException( - "A bytea in escape format was expected, but " + value - + " was found" - ); - } - return new ByteArrayKvMongoObjectId(HexUtils.hex2Bytes(value.substring(2))); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/MongoTimestampValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/MongoTimestampValueToJsonConverter.java deleted file mode 100644 index 7cb8a5b6..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/MongoTimestampValueToJsonConverter.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.backend.udt.MongoTimestampUDT; -import com.torodb.kvdocument.values.KvMongoTimestamp; -import com.torodb.kvdocument.values.heap.DefaultKvMongoTimestamp; - -import javax.json.JsonObject; - -/** - * - */ -public class MongoTimestampValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - private static final String SECS = MongoTimestampUDT.SECS.getName(); - private static final String COUNTER = MongoTimestampUDT.COUNTER.getName(); - - @Override - public Class getJsonClass() { - return JsonObject.class; - } - - @Override - public Class getValueClass() { - return KvMongoTimestamp.class; - } - - @Override - public KvMongoTimestamp toValue(JsonObject value) { - assert isValid(value); - return new DefaultKvMongoTimestamp(value.getInt(SECS), value.getInt(COUNTER)); - } - - public boolean isValid(JsonObject object) { - try { - object.getInt(SECS); - object.getInt(COUNTER); - return true; - } catch (NullPointerException | ClassCastException ex) { - return false; - } - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/NullValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/NullValueToJsonConverter.java deleted file mode 100644 index cfcf6d41..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/NullValueToJsonConverter.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.values.KvNull; - -/** - * - */ -public class NullValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return Void.class; - } - - @Override - public Class getValueClass() { - return KvNull.class; - } - - @Override - public KvNull toValue(Void value) { - return KvNull.getInstance(); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/StringValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/StringValueToJsonConverter.java deleted file mode 100644 index 9b134f08..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/StringValueToJsonConverter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.values.KvString; -import com.torodb.kvdocument.values.heap.StringKvString; - -/** - * - */ -public class StringValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return String.class; - } - - @Override - public Class getValueClass() { - return KvString.class; - } - - @Override - public KvString toValue(String value) { - return new StringKvString(value); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/TimeValueToJsonConverter.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/TimeValueToJsonConverter.java deleted file mode 100644 index fe5674bd..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/TimeValueToJsonConverter.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.values.KvTime; -import com.torodb.kvdocument.values.heap.LocalTimeKvTime; - -import java.time.LocalTime; - -/** - * - */ -public class TimeValueToJsonConverter implements - ValueConverter { - - private static final long serialVersionUID = 1L; - - @Override - public Class getJsonClass() { - return String.class; - } - - @Override - public Class getValueClass() { - return KvTime.class; - } - - @Override - public KvTime toValue(String value) { - return new LocalTimeKvTime(LocalTime.parse(value)); - } - -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/ValueToJsonConverterProvider.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/json/ValueToJsonConverterProvider.java deleted file mode 100644 index 12ed23a8..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/json/ValueToJsonConverterProvider.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.json; - -import com.torodb.backend.converters.ValueConverter; -import com.torodb.kvdocument.types.KvType; - -import java.io.Serializable; - -import javax.annotation.Nonnull; - -/** - * - */ -public interface ValueToJsonConverterProvider extends Serializable { - - @Nonnull - ValueConverter getConverter(KvType valueType); -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/BinarySqlBinding.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/BinarySqlBinding.java deleted file mode 100644 index c4b0661a..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/BinarySqlBinding.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.sql; - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -public class BinarySqlBinding implements SqlBinding { - - public static final BinarySqlBinding INSTANCE = new BinarySqlBinding(); - - @Override - @SuppressFBWarnings(value = "PZLA_PREFER_ZERO_LENGTH_ARRAYS", - justification = "Null value has different meaning from empty array value") - public byte[] get(ResultSet resultSet, int columnIndex) throws SQLException { - byte[] value = resultSet.getBytes(columnIndex); - if (resultSet.wasNull()) { - return null; - } - return value; - } - - @Override - public void set(PreparedStatement preparedStatement, int parameterIndex, byte[] value) throws - SQLException { - preparedStatement.setBytes(parameterIndex, value); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/BooleanSqlBinding.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/BooleanSqlBinding.java deleted file mode 100644 index 5ddcd5f4..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/BooleanSqlBinding.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.sql; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -public class BooleanSqlBinding implements SqlBinding { - - public static final BooleanSqlBinding INSTANCE = new BooleanSqlBinding(); - - @Override - public Boolean get(ResultSet resultSet, int columnIndex) throws SQLException { - boolean value = resultSet.getBoolean(columnIndex); - if (resultSet.wasNull()) { - return null; - } - return value; - } - - @Override - public void set(PreparedStatement preparedStatement, int parameterIndex, Boolean value) throws - SQLException { - preparedStatement.setBoolean(parameterIndex, value); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/DateSqlBinding.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/DateSqlBinding.java deleted file mode 100644 index 8ea83180..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/DateSqlBinding.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.sql; - -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -public class DateSqlBinding implements SqlBinding { - - public static final DateSqlBinding INSTANCE = new DateSqlBinding(); - - @Override - public Date get(ResultSet resultSet, int columnIndex) throws SQLException { - Date value = resultSet.getDate(columnIndex); - if (resultSet.wasNull()) { - return null; - } - return value; - } - - @Override - public void set(PreparedStatement preparedStatement, int parameterIndex, Date value) throws - SQLException { - preparedStatement.setDate(parameterIndex, value); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/DoubleSqlBinding.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/DoubleSqlBinding.java deleted file mode 100644 index b1a68ba1..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/DoubleSqlBinding.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.sql; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -public class DoubleSqlBinding implements SqlBinding { - - public static final DoubleSqlBinding INSTANCE = new DoubleSqlBinding(); - - @Override - public Double get(ResultSet resultSet, int columnIndex) throws SQLException { - double value = resultSet.getDouble(columnIndex); - if (resultSet.wasNull()) { - return null; - } - return value; - } - - @Override - public void set(PreparedStatement preparedStatement, int parameterIndex, Double value) throws - SQLException { - preparedStatement.setDouble(parameterIndex, value); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/IntegerSqlBinding.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/IntegerSqlBinding.java deleted file mode 100644 index baf07a21..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/IntegerSqlBinding.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.sql; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -public class IntegerSqlBinding implements SqlBinding { - - public static final IntegerSqlBinding INSTANCE = new IntegerSqlBinding(); - - @Override - public Integer get(ResultSet resultSet, int columnIndex) throws SQLException { - int value = resultSet.getInt(columnIndex); - if (resultSet.wasNull()) { - return null; - } - return value; - } - - @Override - public void set(PreparedStatement preparedStatement, int parameterIndex, Integer value) throws - SQLException { - preparedStatement.setInt(parameterIndex, value); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/LongSqlBinding.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/LongSqlBinding.java deleted file mode 100644 index fe2f3bdb..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/LongSqlBinding.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.sql; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -public class LongSqlBinding implements SqlBinding { - - public static final LongSqlBinding INSTANCE = new LongSqlBinding(); - - @Override - public Long get(ResultSet resultSet, int columnIndex) throws SQLException { - long value = resultSet.getLong(columnIndex); - if (resultSet.wasNull()) { - return null; - } - return value; - } - - @Override - public void set(PreparedStatement preparedStatement, int parameterIndex, Long value) throws - SQLException { - preparedStatement.setLong(parameterIndex, value); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/SqlBinding.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/SqlBinding.java deleted file mode 100644 index d5e53ddc..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/SqlBinding.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.sql; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; - -public interface SqlBinding { - - /** - * Return the value from the {@code ResultSet} at the specified {@code columnIndex}. If the value - * in the {@code ResultSet} is null it must return null too. - * - * @param resultSet - * @param columnIndex - * @return - * @throws SQLException - */ - @Nullable - public T get(@Nonnull ResultSet resultSet, int columnIndex) throws SQLException; - - /** - * Set the parameter of {@code PreparedStatement} at specified {@code parameterIndex}. - * - * @param preparedStatement - * @param parameterIndex - * @param value - * @throws SQLException - */ - public void set(@Nonnull PreparedStatement preparedStatement, int parameterIndex, - @Nonnull T value) throws SQLException; - - /** - * Return the placeholder for a value to use in SQL statement. - * - * @return - */ - public default String getPlaceholder() { - return "?"; - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/StringSqlBinding.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/StringSqlBinding.java deleted file mode 100644 index 28bd12d0..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/StringSqlBinding.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.sql; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -public class StringSqlBinding implements SqlBinding { - - public static final StringSqlBinding INSTANCE = new StringSqlBinding(); - - @Override - public String get(ResultSet resultSet, int columnIndex) throws SQLException { - String value = resultSet.getString(columnIndex); - if (resultSet.wasNull()) { - return null; - } - return value; - } - - @Override - public void set(PreparedStatement preparedStatement, int parameterIndex, String value) throws - SQLException { - preparedStatement.setString(parameterIndex, value); - } -} diff --git a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/TimeSqlBinding.java b/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/TimeSqlBinding.java deleted file mode 100644 index fe8d3824..00000000 --- a/engine/backend/common/src/main/java/com/torodb/backend/converters/sql/TimeSqlBinding.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * ToroDB - * Copyright © 2014 8Kdata Technology (www.8kdata.com) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package com.torodb.backend.converters.sql; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Time; - -public class TimeSqlBinding implements SqlBinding