From bd019e347733c5a2b4c6bbf5c3911ac8515d8a23 Mon Sep 17 00:00:00 2001 From: Frank Rosner Date: Tue, 31 Jan 2017 13:10:21 +0100 Subject: [PATCH] #124 bump version --- README.md | 6 +++--- build.sbt | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3b444ae..83c14e3 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ DDQ is a small library for checking constraints on Spark data structures. It can DDQ is available as a [spark package](http://spark-packages.org/package/FRosner/drunken-data-quality). You can add it to your spark-shell, spark-submit or pyspark using the `--packages` command line option: ```sh -spark-shell --packages FRosner:drunken-data-quality:4.0.0-s_2.10 +spark-shell --packages FRosner:drunken-data-quality:4.1.0-s_2.11 ``` ### Python API @@ -21,7 +21,7 @@ spark-shell --packages FRosner:drunken-data-quality:4.0.0-s_2.10 DDQ also comes with a Python API. It is available via the Python Package Index, so you have to install it once using `pip`: ``` -pip install pyddq==4.0.0 +pip install pyddq==4.1.0 ``` ### Project Dependency [![Latest Release](https://img.shields.io/github/tag/FRosner/drunken-data-quality.svg?label=JitPack)](https://jitpack.io/#FRosner/drunken-data-quality) @@ -150,7 +150,7 @@ assert(constraintResults(uniqueKeyConstraint).isInstanceOf[ConstraintSuccess]) In order to use the Python API, you have to start PySpark with the DDQ jar added. Unfortunately, using the `--packages` way is [not working in Spark < 2.0](https://issues.apache.org/jira/browse/SPARK-5185). ``` -pyspark --driver-class-path drunken-data-quality_2.10-x.y.z.jar +pyspark --driver-class-path drunken-data-quality_2.11-x.y.z.jar ``` Then you can create a dummy dataframe and run a few checks. diff --git a/build.sbt b/build.sbt index b2a7b1c..b5fc329 100644 --- a/build.sbt +++ b/build.sbt @@ -1,6 +1,6 @@ organization := "de.frosner" -version := "4.1.0-SNAPSHOT" +version := "4.1.0" name := "drunken-data-quality"