diff --git a/.gitignore b/.gitignore
new file mode 100755
index 0000000..d825d8a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+.DS_Store
+.project
diff --git a/README.md b/README.md
new file mode 100755
index 0000000..a7096c4
--- /dev/null
+++ b/README.md
@@ -0,0 +1,9 @@
+#### An Ambari Service for Hue
+Ambari service for easily installing and managing Hue on HDP cluster.
+
+#### Version
+- Hue 3.11.0
+- Ambari 2.2.2+
+
+#### How to deploy?
+please goto (http://wiki.ttxit.com)[Document] see document
\ No newline at end of file
diff --git a/alerts.json b/alerts.json
new file mode 100755
index 0000000..1a079ff
--- /dev/null
+++ b/alerts.json
@@ -0,0 +1,32 @@
+{
+ "HUE": {
+ "service": [ ],
+ "HUE_SERVER": [
+ {
+ "name": "hue_webui",
+ "label": "Hue Web UI",
+ "description": "This host-level alert is triggered if the Hue Web UI is unreachable.",
+ "interval": 1,
+ "scope": "HOST",
+ "source": {
+ "type": "PORT",
+ "uri": "{{hue-env/hue.port}}",
+ "default_port": 8888,
+ "reporting": {
+ "ok": {
+ "text": "TCP OK - {0:.3f}s response on port {1}"
+ },
+ "warning": {
+ "text": "TCP OK - {0:.3f}s response on port {1}",
+ "value": 1.5
+ },
+ "critical": {
+ "text": "Connection failed: {0} to {1}:{2}",
+ "value": 5
+ }
+ }
+ }
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/configuration/hue-Desktop.xml b/configuration/hue-Desktop.xml
new file mode 100755
index 0000000..d412571
--- /dev/null
+++ b/configuration/hue-Desktop.xml
@@ -0,0 +1,604 @@
+
+
+
+
+
+
+ secret.key
+ jFE93j;2[290-eiw.KEiwN2s3['d;/.q[eIW^y#e=+Iei*@Mn(qW5o
+ Secret Key
+ Set this to a random string, the longer the better.This is used for secure hashing in the session store.
+
+
+
+ app.blacklist
+
+ App blacklist
+ Comma separated list of apps to not load at server startup. List of apps : 'pig','jobbrowser','zookeeper','search','rdbms','metastore','proxy','spark','beeswax','jobsub','hbase','filebrowser'
+
+
+
+ metastore.database.engine
+ sqlite3
+ Metastore Type
+ Hue metastore engine. Database engine is typically one of : 1.postgresql_psycopg2, 2.mysql, 3.sqlite3, 4.oracle.
+
+
+
+
+ metastore.database.host
+
+ Metastore Host
+
+
+
+ metastore.database.port
+
+ Metastore Port
+
+
+
+ metastore.database.name
+ desktop/desktop.db
+ Metastore Name
+
+
+
+ metastore.ConnectionUserName
+ hue
+ Metastore Username
+ username to use against metastore database
+
+
+
+ metastore.ConnectionPassword
+ hue
+ Metastore Password
+ PASSWORD
+ password to use against metastore database
+
+
+
+ metastore.database.options
+ {}
+ Metastore Options
+
+
+
+ hue.kerberos.keytab
+ /etc/security/keytabs/hue.service.keytab
+ Hue Kerberos Keytab
+ Path to Hue's Kerberos keytab file.
+
+
+
+ hue.kerberos.principal
+ hue/_HOST@EXAMPLE.COM
+ Hue Kerberos Principal
+ Kerberos principal name for Hue
+
+
+
+ kinit.path
+ /usr/bin/kinit
+ Kerberos Kinit Path
+ Path to kinit
+
+
+
+ content
+ hue-desktop template
+ Configurations of Hue: Desktop Model
+
+#####################################
+# DEVELOPMENT EDITION
+#####################################
+
+# Hue configuration file
+# ===================================
+#
+# For complete documentation about the contents of this file, run
+# $ (hue_root)/build/env/bin/hue config_help
+#
+# All .ini files under the current directory are treated equally. Their
+# contents are merged to form the Hue configuration, which can
+# can be viewed on the Hue at
+# http://(hue_host):(port)/dump_config
+
+
+###########################################################################
+# General configuration for core Desktop features (authentication, etc)
+###########################################################################
+
+[desktop]
+
+ send_dbug_messages=1
+ # Set this to a random string, the longer the better.
+ # This is used for secure hashing in the session store.
+ secret_key={{secret_key}}
+
+ # Execute this script to produce the Django secret key. This will be used when
+ # `secret_key` is not set.
+ ## secret_key_script=
+
+ # Webserver listens on this address and port
+ http_host={{hostname}}
+ http_port={{hue_port}}
+
+ # Time zone name
+ time_zone=Asia/Shanghai
+
+ # Enable or disable Django debug mode.
+ ## django_debug_mode=true
+
+ # Enable or disable database debug mode.
+ ## database_logging=false
+
+ # Enable or disable backtrace for server error
+ ## http_500_debug_mode=true
+
+ # Enable or disable memory profiling.
+ ## memory_profiler=false
+
+ # Server email for internal error messages
+ ## django_server_email='hue@localhost.localdomain'
+
+ # Email backend
+ ## django_email_backend=django.core.mail.backends.smtp.EmailBackend
+
+ # Webserver runs as this user
+ server_user={{hue_user}}
+ server_group={{hue_user}}
+
+ # This should be the Hue admin and proxy user
+ default_user={{hue_user}}
+
+ # This should be the hadoop cluster admin
+ default_hdfs_superuser=hdfs
+
+ # If set to false, runcpserver will not actually start the web server.
+ # Used if Apache is being used as a WSGI container.
+ ## enable_server=yes
+
+ # Number of threads used by the CherryPy web server
+ ## cherrypy_server_threads=40
+
+ # Filename of SSL Certificate
+ ## ssl_certificate=
+
+ # Filename of SSL RSA Private Key
+ ## ssl_private_key=
+
+ # SSL certificate password
+ ## ssl_password=
+
+ # Execute this script to produce the SSL password. This will be used when `ssl_password` is not set.
+ ## ssl_password_script=
+
+ # List of allowed and disallowed ciphers in cipher list format.
+ # See http://www.openssl.org/docs/apps/ciphers.html for more information on
+ # cipher list format. This list is from
+ # https://wiki.mozilla.org/Security/Server_Side_TLS v3.7 intermediate
+ # recommendation, which should be compatible with Firefox 1, Chrome 1, IE 7,
+ # Opera 5 and Safari 1.
+ ## ssl_cipher_list=ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA
+
+ # Path to default Certificate Authority certificates.
+ ## ssl_cacerts=/etc/hue/cacerts.pem
+
+ # Choose whether Hue should validate certificates received from the server.
+ ## validate=true
+
+ # LDAP username and password of the hue user used for LDAP authentications.
+
+ # Set it to use LDAP Authentication with HiveServer2 and Impala.
+ ## ldap_username=hue
+ ## ldap_password=
+
+ # Default encoding for site data
+ ## default_site_encoding=utf-8
+
+ # Help improve Hue with anonymous usage analytics.
+ # Use Google Analytics to see how many times an application or specific section of an application is used, nothing more.
+ ## collect_usage=true
+
+ # Support for HTTPS termination at the load-balancer level with SECURE_PROXY_SSL_HEADER.
+ ## secure_proxy_ssl_header=false
+
+ # Comma-separated list of Django middleware classes to use.
+ # See https://docs.djangoproject.com/en/1.4/ref/middleware/ for more details on middlewares in Django.
+ ## middleware=desktop.auth.backend.LdapSynchronizationBackend
+
+ # Comma-separated list of regular expressions, which match the redirect URL.
+ # For example, to restrict to your local domain and FQDN, the following value can be used:
+ # ^\/.*$,^http:\/\/www.mydomain.com\/.*$
+ ## redirect_whitelist=^\/.*$
+
+ # Comma separated list of apps to not load at server startup.
+ # e.g.: pig,zookeeper
+ app_blacklist={{app_blacklist}}
+
+ # The directory where to store the auditing logs. Auditing is disable if the value is empty.
+ # e.g. /var/log/hue/audit.log
+ audit_event_log_dir=/var/log/hue/audit.log
+
+ # Size in KB/MB/GB for audit log to rollover.
+ audit_log_max_file_size=100MB
+
+ # A json file containing a list of log redaction rules for cleaning sensitive data
+ # from log files. It is defined as:
+ #
+ # {
+ # "version": 1,
+ # "rules": [
+ # {
+ # "description": "This is the first rule",
+ # "trigger": "triggerstring 1",
+ # "search": "regex 1",
+ # "replace": "replace 1"
+ # },
+ # {
+ # "description": "This is the second rule",
+ # "trigger": "triggerstring 2",
+ # "search": "regex 2",
+ # "replace": "replace 2"
+ # }
+ # ]
+ # }
+ #
+ # Redaction works by searching a string for the [TRIGGER] string. If found,
+ # the [REGEX] is used to replace sensitive information with the
+ # [REDACTION_MASK]. If specified with `log_redaction_string`, the
+ # `log_redaction_string` rules will be executed after the
+ # `log_redaction_file` rules.
+ #
+ # For example, here is a file that would redact passwords and social security numbers:
+
+ # {
+ # "version": 1,
+ # "rules": [
+ # {
+ # "description": "Redact passwords",
+ # "trigger": "password",
+ # "search": "password=\".*\"",
+ # "replace": "password=\"???\""
+ # },
+ # {
+ # "description": "Redact social security numbers",
+ # "trigger": "",
+ # "search": "\d{3}-\d{2}-\d{4}",
+ # "replace": "XXX-XX-XXXX"
+ # }
+ # ]
+ # }
+ ## log_redaction_file=
+
+ # Comma separated list of strings representing the host/domain names that the Hue server can serve.
+ # e.g.: localhost,domain1,*
+ ## allowed_hosts=*
+
+ # Administrators
+ # ----------------
+ [[django_admins]]
+ ## [[[admin1]]]
+ ## name=john
+ ## email=john@doe.com
+
+ # UI customizations
+ # -------------------
+ [[custom]]
+
+ # Top banner HTML code
+ # e.g. (H4)Test Lab A2 Hue Services(/H4)
+ ## banner_top_html=
+
+ # Configuration options for user authentication into the web application
+ # ------------------------------------------------------------------------
+ [[auth]]
+
+ # Authentication backend. Common settings are:
+ # - django.contrib.auth.backends.ModelBackend (entirely Django backend)
+ # - desktop.auth.backend.AllowAllBackend (allows everyone)
+ # - desktop.auth.backend.AllowFirstUserDjangoBackend
+ # (Default. Relies on Django and user manager, after the first login)
+ # - desktop.auth.backend.LdapBackend
+ # - desktop.auth.backend.PamBackend
+ # - desktop.auth.backend.SpnegoDjangoBackend
+ # - desktop.auth.backend.RemoteUserDjangoBackend
+ # - libsaml.backend.SAML2Backend
+ # - libopenid.backend.OpenIDBackend
+ # - liboauth.backend.OAuthBackend
+ # (New oauth, support Twitter, Facebook, Google+ and Linkedin
+ # Multiple Authentication backends are supported by specifying a comma-separated list in order of priority.
+ # However, in order to enable OAuthBackend, it must be the ONLY backend configured.
+ ## backend=desktop.auth.backend.AllowFirstUserDjangoBackend
+
+ # The service to use when querying PAM.
+ ## pam_service=login
+
+ # When using the desktop.auth.backend.RemoteUserDjangoBackend, this sets
+ # the normalized name of the header that contains the remote user.
+ # The HTTP header in the request is converted to a key by converting
+ # all characters to uppercase, replacing any hyphens with underscores
+ # and adding an HTTP_ prefix to the name. So, for example, if the header
+ # is called Remote-User that would be configured as HTTP_REMOTE_USER
+ #
+ # Defaults to HTTP_REMOTE_USER
+ ## remote_user_header=HTTP_REMOTE_USER
+
+ # Ignore the case of usernames when searching for existing users.
+ # Only supported in remoteUserDjangoBackend.
+ ## ignore_username_case=true
+
+ # Ignore the case of usernames when searching for existing users to authenticate with.
+ # Only supported in remoteUserDjangoBackend.
+ ## force_username_lowercase=true
+
+ # Users will expire after they have not logged in for 'n' amount of seconds.
+ # A negative number means that users will never expire.
+ ## expires_after=-1
+
+ # Apply 'expires_after' to superusers.
+ ## expire_superusers=true
+
+ # Force users to change password on first login with desktop.auth.backend.AllowFirstUserDjangoBackend
+ ## change_default_password=false
+
+ # Configuration options for connecting to LDAP and Active Directory
+ # -------------------------------------------------------------------
+ [[ldap]]
+
+ # The search base for finding users and groups
+ ## base_dn="DC=mycompany,DC=com"
+
+ # URL of the LDAP server
+ ## ldap_url=ldap://auth.mycompany.com
+
+ # A PEM-format file containing certificates for the CA's that
+ # Hue will trust for authentication over TLS.
+ # The certificate for the CA that signed the
+ # LDAP server certificate must be included among these certificates.
+ # See more here http://www.openldap.org/doc/admin24/tls.html.
+ ## ldap_cert=
+ ## use_start_tls=true
+
+ # Distinguished name of the user to bind as -- not necessary if the LDAP server
+ # supports anonymous searches
+ ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
+
+ # Password of the bind user -- not necessary if the LDAP server supports
+ # anonymous searches
+ ## bind_password=
+
+ # Execute this script to produce the bind user password. This will be used
+ # when `bind_password` is not set.
+ ## bind_password_script=
+
+ # Pattern for searching for usernames -- Use (username) for the parameter
+ # For use when using LdapBackend for Hue authentication
+ ## ldap_username_pattern="uid=(username),ou=People,dc=mycompany,dc=com"
+
+ # Create users in Hue when they try to login with their LDAP credentials
+ # For use when using LdapBackend for Hue authentication
+ ## create_users_on_login = true
+
+ # Synchronize a users groups when they login
+ ## sync_groups_on_login=false
+
+ # Ignore the case of usernames when searching for existing users in Hue.
+ ## ignore_username_case=true
+
+ # Force usernames to lowercase when creating new users from LDAP.
+ ## force_username_lowercase=true
+
+ # Use search bind authentication.
+ ## search_bind_authentication=true
+
+ # Choose which kind of subgrouping to use: nested or suboordinate (deprecated).
+ ## subgroups=suboordinate
+
+ # Define the number of levels to search for nested members.
+ ## nested_members_search_depth=10
+
+ # Whether or not to follow referrals
+ ## follow_referrals=false
+
+ # Enable python-ldap debugging.
+ ## debug=false
+
+ # Sets the debug level within the underlying LDAP C lib.
+ ## debug_level=255
+
+ # Possible values for trace_level are 0 for no logging, 1 for only logging the method calls with arguments,
+ # 2 for logging the method calls with arguments and the complete results and 9 for also logging the traceback of method calls.
+ ## trace_level=0
+
+ [[[users]]]
+
+ # Base filter for searching for users
+ ## user_filter="objectclass=*"
+
+ # The username attribute in the LDAP schema
+ ## user_name_attr=sAMAccountName
+
+ [[[groups]]]
+
+ # Base filter for searching for groups
+ ## group_filter="objectclass=*"
+
+ # The group name attribute in the LDAP schema
+ ## group_name_attr=cn
+
+ # The attribute of the group object which identifies the members of the group
+ ## group_member_attr=members
+
+ [[[ldap_servers]]]
+
+ ## [[[[mycompany]]]]
+
+ # The search base for finding users and groups
+ ## base_dn="DC=mycompany,DC=com"
+
+ # URL of the LDAP server
+ ## ldap_url=ldap://auth.mycompany.com
+
+ # A PEM-format file containing certificates for the CA's that
+ # Hue will trust for authentication over TLS.
+ # The certificate for the CA that signed the
+ # LDAP server certificate must be included among these certificates.
+ # See more here http://www.openldap.org/doc/admin24/tls.html.
+ ## ldap_cert=
+ ## use_start_tls=true
+
+ # Distinguished name of the user to bind as -- not necessary if the LDAP server
+ # supports anonymous searches
+ ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
+
+ # Password of the bind user -- not necessary if the LDAP server supports
+ # anonymous searches
+ ## bind_password=
+
+ # Execute this script to produce the bind user password. This will be used
+ # when `bind_password` is not set.
+ ## bind_password_script=
+
+ # Pattern for searching for usernames -- Use username for the parameter
+ # For use when using LdapBackend for Hue authentication
+ ## ldap_username_pattern="uid= username,ou=People,dc=mycompany,dc=com"
+
+ ## Use search bind authentication.
+ ## search_bind_authentication=true
+
+ # Whether or not to follow referrals
+ ## follow_referrals=false
+
+ # Enable python-ldap debugging.
+ ## debug=false
+
+ # Sets the debug level within the underlying LDAP C lib.
+ ## debug_level=255
+
+ # Possible values for trace_level are 0 for no logging, 1 for only logging the method calls with arguments,
+ # 2 for logging the method calls with arguments and the complete results and 9 for also logging the traceback of method calls.
+ ## trace_level=0
+
+ ## [[[[[users]]]]]
+
+ # Base filter for searching for users
+ ## user_filter="objectclass=Person"
+
+ # The username attribute in the LDAP schema
+ ## user_name_attr=sAMAccountName
+
+ ## [[[[[groups]]]]]
+
+ # Base filter for searching for groups
+ ## group_filter="objectclass=groupOfNames"
+
+ # The username attribute in the LDAP schema
+ ## group_name_attr=cn
+
+ # Configuration options for specifying the Desktop Database. For more info,
+ # see http://docs.djangoproject.com/en/1.4/ref/settings/#database-engine
+ # ------------------------------------------------------------------------
+ [[database]]
+ # Database engine is typically one of:
+ # postgresql_psycopg2, mysql, sqlite3 or oracle.
+ #
+ # Note that for sqlite3, 'name', below is a path to the filename. For other backends, it is the database name.
+ # Note for Oracle, options={"threaded":true} must be set in order to avoid crashes.
+ # Note for Oracle, you can use the Oracle Service Name by setting "port=0" and then "name=host:port/service_name".
+ # Note for MariaDB use the 'mysql' engine.
+ {% if metastore_database_engine == 'sqlite3' %}
+ ## engine=sqlite3
+ ## host=
+ ## port=
+ ## user=
+ ## password=
+ ## name=desktop/desktop.db
+ ## options={}
+ {% else %}
+ engine={{metastore_database_engine}}
+ host={{metastore_database_host}}
+ port={{metastore_database_port}}
+ user={{metastore_database_user}}
+ password={{metastore_database_password}}
+ name={{metastore_database_name}}
+ options={{metastore_databass_options}}
+ {% endif %}
+ # Configuration options for specifying the Desktop session.
+ # For more info, see https://docs.djangoproject.com/en/1.4/topics/http/sessions/
+ # ------------------------------------------------------------------------
+ [[session]]
+ # The cookie containing the users' session ID will expire after this amount of time in seconds.
+ # Default is 2 weeks.
+ ## ttl=1209600
+
+ # The cookie containing the users' session ID will be secure.
+ # Should only be enabled with HTTPS.
+ ## secure=false
+
+ # The cookie containing the users' session ID will use the HTTP only flag.
+ ## http_only=true
+
+ # Use session-length cookies. Logs out the user when she closes the browser window.
+ ## expire_at_browser_close=false
+
+
+ # Configuration options for connecting to an external SMTP server
+ # ------------------------------------------------------------------------
+ [[smtp]]
+
+ # The SMTP server information for email notification delivery
+ host=localhost
+ port=25
+ user=
+ password=
+
+ # Whether to use a TLS (secure) connection when talking to the SMTP server
+ tls=no
+
+ # Default email address to use for various automated notification from Hue
+ ## default_from_email=hue@localhost
+
+
+ # Configuration options for Kerberos integration for secured Hadoop clusters
+ # ------------------------------------------------------------------------
+ [[kerberos]]
+
+ {% if security_enabled %}
+ # Path to Hue's Kerberos keytab file
+ hue_keytab={{hue_keytab}}
+ # Kerberos principal name for Hue
+ hue_principal={{hue_principal}}
+ # Path to kinit
+ kinit_path={{kinit_path}}
+ {% else %}
+ # Path to Hue's Kerberos keytab file
+ ## hue_keytab=/etc/security/keytabs/hue.service.keytab
+ # Kerberos principal name for Hue
+ ## hue_principal=hue/_HOST@EXAMPLE.COM
+ # Path to kinit
+ ## kinit_path=/usr/bin/kinit
+ {% endif %}
+
+ # Configuration options for using OAuthBackend (Core) login
+ # ------------------------------------------------------------------------
+ [[oauth]]
+ # The Consumer key of the application
+ ## consumer_key=XXXXXXXXXXXXXXXXXXXXX
+
+ # The Consumer secret of the application
+ ## consumer_secret=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+
+ # The Request token URL
+ ## request_token_url=https://api.twitter.com/oauth/request_token
+
+ # The Access token URL
+ ## access_token_url=https://api.twitter.com/oauth/access_token
+
+ # The Authorize URL
+ ## authenticate_url=https://api.twitter.com/oauth/authorize
+
+
+
diff --git a/configuration/hue-Hadoop.xml b/configuration/hue-Hadoop.xml
new file mode 100755
index 0000000..ca4a7e8
--- /dev/null
+++ b/configuration/hue-Hadoop.xml
@@ -0,0 +1,165 @@
+
+
+
+
+
+
+ HDFS.HttpFS.host
+
+ HttpFS Server host
+ Hadoop HttpFS service node
+
+
+
+ content
+ hue-hadoop template
+ Configurations of Hue: Hadoop Model
+
+###########################################################################
+# Settings to configure your Hadoop cluster.
+###########################################################################
+
+[hadoop]
+
+ # Configuration for HDFS NameNode
+ # ------------------------------------------------------------------------
+ [[hdfs_clusters]]
+
+ [[[default]]]
+ # Enter the filesystem uri
+ fs_defaultfs={{namenode_address}}
+
+ # NameNode logical name.
+ {% if dfs_ha_enabled %}
+ logical_name=hdfs://{{logical_name}}
+ {% else %}
+ ## logical_name=
+ {% endif %}
+ # Use WebHdfs/HttpFs as the communication mechanism.
+ # Domain should be the NameNode or HttpFs host.
+ # Default port is 14000 for HttpFs.
+ webhdfs_url={{webhdfs_url}}
+
+ # Change this if your HDFS cluster is Kerberos-secured
+ {% if security_enabled %}
+ security_enabled={{security_enabled}}
+ {% else %}
+ ## security_enabled=false
+ {% endif %}
+ # In secure mode (HTTPS), if SSL certificates from YARN Rest APIs
+ # have to be verified against certificate authority
+ ## ssl_cert_ca_verify=True
+
+ # Directory of the Hadoop configuration
+ hadoop_conf_dir=/etc/hadoop/conf
+
+ # Configuration for YARN (MR2)
+ # ------------------------------------------------------------------------
+ [[yarn_clusters]]
+
+ [[[default]]]
+ # Enter the host on which you are running the ResourceManager
+ resourcemanager_host={{resourcemanager_host1}}
+
+ # The port where the ResourceManager IPC listens on
+ resourcemanager_port={{resourcemanager_port}}
+
+ # Whether to submit jobs to this cluster
+ submit_to=True
+
+ # Resource Manager logical name (required for HA)
+ {% if resourcemanager_ha_enabled %}
+ logical_name={{logical_name}}
+ {% else %}
+ ## logical_name=
+ {% endif %}
+
+ # Change this if your YARN cluster is Kerberos-secured
+ {% if security_enabled %}
+ security_enabled={{security_enabled}}
+ {% else %}
+ ## security_enabled=false
+ {% endif %}
+
+ # URL of the ResourceManager API
+ resourcemanager_api_url={{resourcemanager_api_url1}}
+
+ # URL of the ProxyServer API
+ proxy_api_url={{proxy_api_url1}}
+
+ # URL of the HistoryServer API
+ history_server_api_url={{history_server_api_url}}
+
+ # URL of the Spark History Server
+ spark_history_server_url={{spark_history_server_url}}
+
+ # In secure mode (HTTPS), if SSL certificates from YARN Rest APIs
+ # have to be verified against certificate authority
+ ## ssl_cert_ca_verify=True
+
+ # HA support by specifying multiple clusters.
+ # Redefine different properties there.
+ # e.g.
+
+ {% if resourcemanager_ha_enabled %}
+ [[[ha]]]
+ # Resource Manager logical name (required for HA)
+ logical_name={{logical_name}}
+
+ # Un-comment to enable
+ submit_to=True
+
+ # URL of the ResourceManager API
+ resourcemanager_api_url={{resourcemanager_api_url2}}
+ resourcemanager_host={{resourcemanager_host2}}
+ resourcemanager_port={{resourcemanager_port}}
+ proxy_api_url={{proxy_api_url2}}
+ history_server_api_url={{history_server_api_url}}
+ spark_history_server_url={{spark_history_server_url}}
+ {% else %}
+ # [[[ha]]]
+ # Resource Manager logical name (required for HA)
+ ## logical_name=my-rm-name
+
+ # Un-comment to enable
+ ## submit_to=True
+
+ # URL of the ResourceManager API
+ ## resourcemanager_api_url=http://localhost:8088
+
+ # ...
+ {% endif %}
+
+ # Configuration for MapReduce (MR1)
+ # ------------------------------------------------------------------------
+ [[mapred_clusters]]
+
+ [[[default]]]
+ # Enter the host on which you are running the Hadoop JobTracker
+ ## jobtracker_host=localhost
+
+ # The port where the JobTracker IPC listens on
+ ## jobtracker_port=8021
+
+ # JobTracker logical name for HA
+ ## logical_name=
+
+ # Thrift plug-in port for the JobTracker
+ ## thrift_port=9290
+
+ # Whether to submit jobs to this cluster
+ submit_to=False
+
+ # Change this if your MapReduce cluster is Kerberos-secured
+ ## security_enabled=false
+
+ # HA support by specifying multiple clusters
+ # e.g.
+
+ # [[[ha]]]
+ # Enter the logical name of the JobTrackers
+ ## logical_name=my-jt-name
+
+
+
+
diff --git a/configuration/hue-Hbase.xml b/configuration/hue-Hbase.xml
new file mode 100755
index 0000000..0f7e229
--- /dev/null
+++ b/configuration/hue-Hbase.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+ content
+ hue-hbase template
+ Configurations of Hue: Hbase Model
+
+###########################################################################
+# Settings to configure HBase Browser
+###########################################################################
+
+[hbase]
+ # Comma-separated list of HBase Thrift servers for clusters in the format of '(name|host:port)'.
+ # Use full hostname with security.
+ # If using Kerberos we assume GSSAPI SASL, not PLAIN.
+ {% if if_hbase_exist %}
+ hbase_clusters={{hbase_cluster}}
+ {% else %}
+ ## hbase_clusters=(cluster1|localhost1:9090),(cluster2|localhost2:9090)
+ {% endif %}
+ # HBase configuration directory, where hbase-site.xml is located.
+ hbase_conf_dir=/etc/hbase/conf
+
+ # Hard limit of rows or columns per row fetched before truncating.
+ truncate_limit = 500
+
+ # 'buffered' is the default of the HBase Thrift Server and supports security.
+ # 'framed' can be used to chunk up responses,
+ # which is useful when used in conjunction with the nonblocking server in Thrift.
+ thrift_transport=buffered
+
+
+
+
+
+
diff --git a/configuration/hue-Hive.xml b/configuration/hue-Hive.xml
new file mode 100755
index 0000000..e490d80
--- /dev/null
+++ b/configuration/hue-Hive.xml
@@ -0,0 +1,64 @@
+
+
+
+
+
+
+ content
+ hue-hive template
+ Configurations of Hue: Hive(beeswax) Model
+
+###########################################################################
+# Settings to configure Beeswax with Hive
+###########################################################################
+
+[beeswax]
+ # Host where HiveServer2 is running.
+ # If Kerberos security is enabled, use fully-qualified domain name (FQDN).
+ {% if if_hbase_exist %}
+ hive_server_host={{hive_server_host}}
+ # Port where HiveServer2 Thrift server runs on.
+ hive_server_port={{hive_server_port}}
+ {% else %}
+ ## hive_server_host=localhost
+ ## hive_server_port=10000
+ {% endif %}
+ # Hive configuration directory, where hive-site.xml is located
+ hive_conf_dir=/etc/hive/conf
+
+ # Timeout in seconds for thrift calls to Hive service
+ server_conn_timeout=120
+
+ # Choose whether to use the old GetLog() thrift call from before Hive 0.14 to retrieve the logs.
+ # If false, use the FetchResults() thrift call from Hive 1.0 or more instead.
+ ## use_get_log_api=false
+
+ # Set a LIMIT clause when browsing a partitioned table.
+ # A positive value will be set as the LIMIT. If 0 or negative, do not set any limit.
+ browse_partitioned_table_limit=250
+
+ # A limit to the number of rows that can be downloaded from a query.
+ # A value of -1 means there will be no limit.
+ # A maximum of 30,000 is applied to XLS downloads.
+ download_row_limit=1000000
+
+ # Hue will try to close the Hive query when the user leaves the editor page.
+ # This will free all the query resources in HiveServer2, but also make its results inaccessible.
+ ## close_queries=false
+
+ # Thrift version to use when communicating with HiveServer2.
+ # New column format is from version 7.
+ ## thrift_version=7
+
+ [[ssl]]
+ # Path to Certificate Authority certificates.
+ ## cacerts=/etc/hue/cacerts.pem
+
+ # Choose whether Hue should validate certificates received from the server.
+ ## validate=true
+
+
+
+
+
+
diff --git a/configuration/hue-Oozie.xml b/configuration/hue-Oozie.xml
new file mode 100755
index 0000000..754a40b
--- /dev/null
+++ b/configuration/hue-Oozie.xml
@@ -0,0 +1,68 @@
+
+
+
+
+
+
+ content
+ hue-oozie template
+ Configurations of Hue: Oozie Model
+
+###########################################################################
+# Settings to configure the Oozie app
+###########################################################################
+
+[oozie]
+ {% if if_oozie_exist %}
+ # Location on local FS where the examples are stored.
+ local_data_dir=/usr/local/hue/apps/oozie/examples
+
+ # Location on local FS where the data for the examples is stored.
+ sample_data_dir=/usr/local/hue/apps/oozie/examples/input_data
+
+ # Location on HDFS where the oozie examples and workflows are stored.
+ remote_data_dir=/user/hue/oozie/workspaces
+
+ # Maximum of Oozie workflows or coodinators to retrieve in one API call.
+ oozie_jobs_count=50
+
+ # Use Cron format for defining the frequency of a Coordinator instead of the old frequency number/unit.
+ enable_cron_scheduling=true
+ {% else %}
+ ## local_data_dir=/usr/local/hue/apps/oozie/examples
+ ## sample_data_dir=/usr/local/hue/apps/oozie/examples/input_data
+ ## remote_data_dir=/user/hue/oozie/workspaces
+ ## oozie_jobs_count=50
+ ## enable_cron_scheduling=true
+ {% endif %}
+###########################################################################
+# Settings to configure liboozie
+###########################################################################
+
+[liboozie]
+ # The URL where the Oozie service runs on. This is required in order for
+ # users to submit jobs. Empty value disables the config check.
+ {% if if_oozie_exist %}
+ oozie_url={{oozie_url}}
+ {% else %}
+ ## oozie_url=http://localhost:11000/oozie
+ {% endif %}
+
+ # Requires FQDN in oozie_url if enabled
+ {% if security_enabled %}
+ security_enabled={{security_enabled}}
+ {% else %}
+ ## security_enabled=false
+ {% endif %}
+
+ {% if if_oozie_exist %}
+ # Location on HDFS where the workflows/coordinator are deployed when submitted.
+ remote_deployement_dir=/user/hue/oozie/deployments
+ {% else %}
+ ## remote_deployement_dir=/user/hue/oozie/deployments
+ {% endif %}
+
+
+
+
+
diff --git a/configuration/hue-Pig.xml b/configuration/hue-Pig.xml
new file mode 100755
index 0000000..eeecbbd
--- /dev/null
+++ b/configuration/hue-Pig.xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+ content
+ hue-pig template
+ Configurations of Hue: Pig Model
+
+###########################################################################
+# Settings to configure Pig
+###########################################################################
+
+[pig]
+ {% if if_pig_exist %}
+ # Location of piggybank.jar on local filesystem.
+ local_sample_dir=/usr/hdp/current/pig-client
+ # Location piggybank.jar will be copied to in HDFS.
+ remote_data_dir=/user/hue/pig/examples
+ {% else %}
+ ## local_sample_dir=/usr/hdp/current/pig-client
+ ## remote_data_dir=/user/hue/pig/examples
+ {% endif %}
+
+
+
+
+
diff --git a/configuration/hue-RDBMS.xml b/configuration/hue-RDBMS.xml
new file mode 100755
index 0000000..e21cdf5
--- /dev/null
+++ b/configuration/hue-RDBMS.xml
@@ -0,0 +1,136 @@
+
+
+
+
+
+
+ Database.engine
+ sqlite
+ Database Type
+ Database backend to use. This can be: 1.mysql、2.postgresql、3.oracle、4.sqlite
+
+
+
+ Nice.name
+ "SQLITE DB"
+ Show Name
+ Name to show in the UI.
+
+
+
+ Database.host
+
+ Database Host
+ IP or hostname of the database to connect to.
+
+
+
+ Database.port
+
+ Database Port
+ Port the database server is listening to. Defaults are: 1.MySQL: 3306、 2.PostgreSQL: 5432、 3.Oracle Express Edition: 1521
+
+
+
+ Database.name
+ /usr/local/hue/desktop/desktop.db
+ Database Name
+
+ For SQLite, name defines the path to the database.
+ For MySQL and PostgreSQL, name is the name of the database.
+ For Oracle, Name is instance of the Oracle server. For express edition this is 'xe' by default.
+
+
+
+
+ Database.user
+
+ Database Username
+ Username to authenticate with when connecting to the database.
+
+
+
+ Database.password
+
+ Database Password
+ PASSWORD
+ Password matching the username to authenticate with when connecting to the database.
+
+
+
+ options
+ {}
+ Database Options
+ Database options to send to the server when connecting.
+
+
+
+ content
+ hue-rdbms template
+ Configurations of Hue: RDBMS Model
+
+###########################################################################
+# Settings for the RDBMS application
+###########################################################################
+[librdbms]
+ # The RDBMS app can have any number of databases configured in the databases
+ # section. A database is known by its section name
+ # (IE sqlite, mysql, psql, and oracle in the list below).
+ [[databases]]
+ {% if RDBMS_database_engine == 'sqlite' %}
+ # sqlite configuration.
+ [[[sqlite]]]
+ # Name to show in the UI.
+ nice_name={{RDBMS_nice_name}}
+
+ # For SQLite, name defines the path to the database.
+ name={{RDBMS_database_name}}
+
+ # Database backend to use.
+ engine={{RDBMS_database_engine}}
+
+ # Database options to send to the server when connecting.
+ # https://docs.djangoproject.com/en/1.4/ref/databases/
+ options={{RDBMS_options}}
+ {% else %}
+ # mysql, oracle, or postgresql configuration.
+ [[[{{RDBMS_database_engine}}]]]
+ # Name to show in the UI.
+ nice_name={{RDBMS_nice_name}}
+
+ # For MySQL and PostgreSQL, name is the name of the database.
+ # For Oracle, Name is instance of the Oracle server. For express edition
+ # this is 'xe' by default.
+ name={{RDBMS_database_name}}
+
+ # Database backend to use. This can be:
+ # 1. mysql
+ # 2. postgresql
+ # 3. oracle
+ engine={{RDBMS_database_engine}}
+
+ # IP or hostname of the database to connect to.
+ host={{RDBMS_database_host}}
+
+ # Port the database server is listening to. Defaults are:
+ # 1. MySQL: 3306
+ # 2. PostgreSQL: 5432
+ # 3. Oracle Express Edition: 1521
+ port={{RDBMS_database_port}}
+
+ # Username to authenticate with when connecting to the database.
+ user={{RDBMS_database_user}}
+
+ # Password matching the username to authenticate with when
+ # connecting to the database.
+ password={{RDBMS_database_password}}
+
+ # Database options to send to the server when connecting.
+ # https://docs.djangoproject.com/en/1.4/ref/databases/
+ options={{RDBMS_options}}
+ {% endif %}
+
+
+
+
+
diff --git a/configuration/hue-Solr.xml b/configuration/hue-Solr.xml
new file mode 100755
index 0000000..7f52380
--- /dev/null
+++ b/configuration/hue-Solr.xml
@@ -0,0 +1,49 @@
+
+
+
+
+
+
+ content
+ hue-solr template
+ Configurations of Hue: Solr Model
+
+###########################################################################
+# Settings to configure Solr Search
+###########################################################################
+
+[search]
+
+ # URL of the Solr Server
+ {% if if_solr_exist %}
+ solr_url={{solr_url}}
+ {% else %}
+ ## solr_url=http://localhost:8983/solr/
+ {% endif %}
+
+ # Requires FQDN in solr_url if enabled
+ {% if security_enabled %}
+ security_enabled={{security_enabled}}
+ {% else %}
+ ## security_enabled=false
+ {% endif %}
+ ## Query sent when no term is entered
+ empty_query=*:*
+
+ # Use latest Solr 5.2+ features.
+ latest=true
+
+###########################################################################
+# Settings to configure Solr Indexer
+###########################################################################
+
+[indexer]
+
+ # Location of the solrctl binary.
+ solrctl_path=/usr/bin/solrctl
+
+
+
+
+
+
diff --git a/configuration/hue-Spark.xml b/configuration/hue-Spark.xml
new file mode 100755
index 0000000..31ca72e
--- /dev/null
+++ b/configuration/hue-Spark.xml
@@ -0,0 +1,35 @@
+
+
+
+
+
+
+ content
+ hue-spark template
+ configuration for hue: Spark Model
+
+###########################################################################
+# Settings to configure the Spark application.
+###########################################################################
+
+[spark]
+ # Host address of the Livy Server.
+ livy_server_host={{hostname}}
+
+ # Port of the Livy Server.
+ livy_server_port=8998
+
+ # Configure livy to start in local 'process' mode, or 'yarn' workers.
+ livy_server_session_kind=yarn
+
+ # If livy should use proxy users when submitting a job.
+ livy_impersonation_enabled=true
+
+ # List of available types of snippets
+ languages='[{"name": "Scala Shell", "type": "scala"},{"name": "PySpark Shell", "type": "python"},{"name": "R Shell", "type": "r"},{"name": "Jar", "type": "Jar"},{"name": "Python", "type": "py"},{"name": "Impala SQL", "type": "impala"},{"name": "Hive SQL", "type": "hive"},{"name": "Text", "type": "text"}]'
+
+
+
+
+
+
diff --git a/configuration/hue-Zookeeper.xml b/configuration/hue-Zookeeper.xml
new file mode 100755
index 0000000..7a47d4f
--- /dev/null
+++ b/configuration/hue-Zookeeper.xml
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+ content
+ hue-zookeeper template
+ configuration for hue: Zookeeper Model
+
+###########################################################################
+# Settings to configure the Zookeeper application.
+###########################################################################
+
+[zookeeper]
+
+ [[clusters]]
+
+ [[[default]]]
+ # Zookeeper ensemble. Comma separated list of Host/Port.
+ # e.g. localhost:2181,localhost:2182,localhost:2183
+ host_ports={{zookeeper_host_port}}
+
+ # The URL of the REST contrib service (required for znode browsing).
+ rest_url={{rest_url}}
+
+ # Name of Kerberos principal when using security.
+ {% if security_enabled %}
+ principal_name={{zk_principal}}
+ {% else %}
+ ## principal_name=zookeeper/_HOST@EXAMPLE.COM
+ {% endif %}
+
+
+
+
+
diff --git a/configuration/hue-env.xml b/configuration/hue-env.xml
new file mode 100755
index 0000000..150f180
--- /dev/null
+++ b/configuration/hue-env.xml
@@ -0,0 +1,57 @@
+
+
+
+
+
+
+
+ hue.pid.dir
+ /var/run/hue
+ Hue Pid Dir
+ Hue Process ID Directory
+
+
+
+ hue.log.dir
+ /var/log/hue
+ Hue Log Dir
+ Dir for Hue log
+
+
+
+ hue.user
+ hue
+ Hue User
+ USER
+ Hue user
+
+
+
+ hue.group
+ hue
+ Hue Group
+ GROUP
+ Hue group
+
+
+
+ hue.port
+ 8888
+ Hue Web Port
+ Webserver listens on this port
+
+
+
+ hue.package.name
+ hue-3.11.0.tgz
+ Hue Package Name
+ The installation package for Hue
+
+
+
+ hue.version
+ 3.11.0
+ Hue Version
+
+
+
diff --git a/configuration/hue-log4j-env.xml b/configuration/hue-log4j-env.xml
new file mode 100755
index 0000000..0ffa250
--- /dev/null
+++ b/configuration/hue-log4j-env.xml
@@ -0,0 +1,101 @@
+
+
+
+
+
+
+ content
+ log4j-env template
+ Configurations of hue log
+
+########################################
+# Definition for the different objects
+# - FOR DEVELOPMENT ONLY -
+#
+# Directories where log files are kept must already exist.
+# That's why we pick /tmp.
+#
+# The loggers are configured to write to the log files ONLY.
+# Developers may set the DESKTOP_DEBUG environment variable to
+# enable stderr logging output.
+########################################
+
+[logger_root]
+handlers=logfile,errorlog
+
+[logger_access]
+handlers=accesslog
+qualname=access
+
+[logger_django_auth_ldap]
+handlers=accesslog
+qualname=django_auth_ldap
+
+[logger_kazoo_client]
+level=INFO
+handlers=errorlog
+qualname=kazoo.client
+
+[logger_djangosaml2]
+level=INFO
+handlers=errorlog
+qualname=djangosaml2
+
+[logger_requests_packages_urllib3_connectionpool]
+level=DEBUG
+handlers=errorlog
+qualname=requests.packages.urllib3.connectionpool
+
+[handler_stderr]
+class=StreamHandler
+formatter=default
+level=DEBUG
+args=(sys.stderr,)
+
+[handler_accesslog]
+class=handlers.RotatingFileHandler
+level=INFO
+propagate=True
+formatter=access
+args=('/var/log/hue/access.log', 'a', 1000000, 3)
+
+[handler_errorlog]
+class=handlers.RotatingFileHandler
+level=ERROR
+formatter=default
+args=('/var/log/hue/error.log', 'a', 1000000, 3)
+
+[handler_logfile]
+class=handlers.RotatingFileHandler
+# Choices are DEBUG, INFO, WARNING, ERROR, CRITICAL
+level=DEBUG
+formatter=default
+args=('/var/log/hue/%PROC_NAME%.log', 'a', 1000000, 3)
+
+[formatter_default]
+class=desktop.log.formatter.Formatter
+format=[%(asctime)s] %(module)-12s %(levelname)-8s %(message)s
+datefmt=%d/%b/%Y %H:%M:%S %z
+
+[formatter_access]
+class=desktop.log.formatter.Formatter
+format=[%(asctime)s] %(levelname)-8s %(message)s
+datefmt=%d/%b/%Y %H:%M:%S %z
+
+########################################
+# A summary of loggers, handlers and formatters
+########################################
+
+[loggers]
+keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2
+
+[handlers]
+keys=stderr,logfile,accesslog,errorlog
+
+[formatters]
+keys=default,access
+
+
+
+
+
\ No newline at end of file
diff --git a/metainfo.xml b/metainfo.xml
new file mode 100755
index 0000000..d113673
--- /dev/null
+++ b/metainfo.xml
@@ -0,0 +1,172 @@
+
+
+
+ 2.0
+
+
+ HUE
+ Hue
+ Hue is an open source Web interface for analyzing data with any Apache Hadoop
+ 3.11.0
+
+
+ HUE_SERVER
+ Hue Server
+ MASTER
+ 1
+
+
+ PYTHON
+ 1200
+
+
+
+ UserSync
+ true
+
+
+ PYTHON
+
+
+
+ DatabaseSync
+ true
+
+
+ PYTHON
+
+
+
+ AddConfigurations
+
+
+ PYTHON
+
+
+
+
+
+ LIVY_SERVER
+ Livy Server
+ SLAVE
+ 0+
+ true
+
+
+ PYTHON
+ 1200
+
+
+
+ HDFS/HDFS_CLIENT
+ host
+
+ true
+
+
+
+ SPARK/SPARK_CLIENT
+ host
+
+ true
+
+
+
+ SPARK/SPARK_THRIFTSERVER
+ cluster
+
+
+
+
+
+
+ any
+
+
+ gcc-c++
+
+
+ openssl-devel
+
+
+ mysql-devel
+
+
+ wget
+
+
+ tar
+
+
+ asciidoc
+
+
+ krb5-devel
+
+
+ libxml2-devel
+
+
+ libxslt-devel
+
+
+ openldap-devel
+
+
+ python-devel
+
+
+ python-simplejson
+
+
+ python-setuptools
+
+
+ sqlite-devel
+
+
+ rsync
+
+
+ saslwrapper-devel
+
+
+ pycrypto
+
+
+ gmp-devel
+
+
+ libyaml-devel
+
+
+ cyrus-sasl-plain
+
+
+ cyrus-sasl-devel
+
+
+ cyrus-sasl-gssapi
+
+
+ libffi-devel
+
+
+
+
+
+ hue-Desktop
+ hue-Hadoop
+ hue-Hive
+ hue-Hbase
+ hue-Zookeeper
+ hue-Oozie
+ hue-Pig
+ hue-Spark
+ hue-RDBMS
+ hue-Solr
+
+ true
+
+
+
\ No newline at end of file
diff --git a/package/scripts/configs.sh b/package/scripts/configs.sh
new file mode 100755
index 0000000..a32ff42
--- /dev/null
+++ b/package/scripts/configs.sh
@@ -0,0 +1,278 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+usage () {
+ echo "Usage: configs.sh [-u userId] [-p password] [-port port] [-s] [CONFIG_FILENAME | CONFIG_KEY [CONFIG_VALUE]]";
+ echo "";
+ echo " [-u userId]: Optional user ID to use for authentication. Default is 'admin'.";
+ echo " [-p password]: Optional password to use for authentication. Default is 'admin'.";
+ echo " [-port port]: Optional port number for Ambari server. Default is '8080'. Provide empty string to not use port.";
+ echo " [-s]: Optional support of SSL. Default is 'false'. Provide empty string to not use SSL.";
+ echo " : One of 'get', 'set', 'delete'. 'Set' adds/updates as necessary.";
+ echo " : Server external host name";
+ echo " : Name given to cluster. Ex: 'c1'"
+ echo " : One of the various configuration types in Ambari. Ex:global, core-site, hdfs-site, mapred-queue-acls, etc.";
+ echo " [CONFIG_FILENAME]: File where entire configurations are saved to, or read from. Only applicable to 'get' and 'set' actions";
+ echo " [CONFIG_KEY]: Key that has to be set or deleted. Not necessary for 'get' action.";
+ echo " [CONFIG_VALUE]: Optional value to be set. Not necessary for 'get' or 'delete' actions.";
+ exit 1;
+}
+
+USERID="admin"
+PASSWD="admin"
+PORT=":8080"
+SSL_URL_PREFIX=""
+
+if [ "$1" == "-u" ] ; then
+ USERID=$2;
+ shift 2;
+ echo "USERID=$USERID";
+fi
+
+if [ "$1" == "-p" ] ; then
+ PASSWD=$2;
+ shift 2;
+ echo "PASSWORD=$PASSWD";
+fi
+
+if [ "$1" == "-port" ] ; then
+ if [ -z $2 ]; then
+ PORT="";
+ else
+ PORT=":$2";
+ fi
+ shift 2;
+ echo "PORT=$PORT";
+fi
+
+if [ "$1" == "-s" ] ; then
+ SSL_URL_PREFIX="s"
+ shift;
+ echo "SSL is enabled";
+fi
+
+AMBARIURL="http$SSL_URL_PREFIX://$2$PORT"
+CLUSTER=$3
+SITE=$4
+SITETAG=''
+CONFIGKEY=$5
+CONFIGVALUE=$6
+
+###################
+## currentSiteTag()
+###################
+currentSiteTag () {
+ currentSiteTag=''
+ found=''
+
+ #currentSite=`cat ds.json | grep -E "$SITE|tag"`;
+ currentSite=`curl -k -s -u $USERID:$PASSWD "$AMBARIURL/api/v1/clusters/$CLUSTER?fields=Clusters/desired_configs" | grep -E "$SITE|tag"`;
+ for line in $currentSite; do
+ if [ $line != "{" -a $line != ":" -a $line != '"tag"' ] ; then
+ if [ -n "$found" -a -z "$currentSiteTag" ]; then
+ currentSiteTag=$line;
+ fi
+ if [ $line == "\"$SITE\"" ]; then
+ found=$SITE;
+ fi
+ fi
+ done;
+ if [ -z $currentSiteTag ]; then
+ errOutput=`curl -k -s -u $USERID:$PASSWD "$AMBARIURL/api/v1/clusters/$CLUSTER?fields=Clusters/desired_configs"`;
+ echo "[ERROR] \"$SITE\" not found in server response.";
+ echo "[ERROR] Output of \`curl -k -s -u $USERID:$PASSWD \"$AMBARIURL/api/v1/clusters/$CLUSTER?fields=Clusters/desired_configs\"\` is:";
+ echo $errOutput | while read -r line; do
+ echo "[ERROR] $line";
+ done;
+ exit 1;
+ fi
+ currentSiteTag=`echo $currentSiteTag|cut -d \" -f 2`
+ SITETAG=$currentSiteTag;
+}
+
+#############################################
+## doConfigUpdate()
+## @param MODE of update. Either 'set' or 'delete'
+#############################################
+doConfigUpdate () {
+ MODE=$1
+ currentSiteTag
+ echo "########## Performing '$MODE' $CONFIGKEY:$CONFIGVALUE on (Site:$SITE, Tag:$SITETAG)";
+ propertiesStarted=0
+ attributesStarted=0
+ currentLevel=0
+ curl -k -s -u $USERID:$PASSWD "$AMBARIURL/api/v1/clusters/$CLUSTER/configurations?type=$SITE&tag=$SITETAG" | while read -r line; do
+ if [ "$propertiesStarted" -eq 0 -a "$attributesStarted" -eq 0 ]; then
+ if [ "$line" = "\"properties_attributes\" : {" ]; then
+ attributesStarted=$currentLevel
+ elif [ "$line" = "\"properties\" : {" ]; then
+ propertiesStarted=$currentLevel
+ fi
+ fi
+ if [ "$propertiesStarted" -gt 0 ]; then
+ if [ "`echo $line | grep -E "},?$"`" ]; then
+ ## Properties ended
+ ## Add property
+ # Remove the last ,
+ propLen=${#newProperties}
+ lastChar=${newProperties:$propLen-1:1}
+ if [ "$lastChar" == "," ]; then
+ newProperties=${newProperties:0:$propLen-1}
+ fi
+ if [ "$MODE" == "set" ]; then
+ newProperties="$newProperties, \"$CONFIGKEY\" : \"$CONFIGVALUE\" "
+ fi
+ newProperties=$newProperties$line
+ propertiesStarted=0
+ elif [ "`echo $line | grep "\\\"$CONFIGKEY\\\""`" ]; then
+ echo "########## Config found. Skipping origin value"
+ else
+ newProperties=$newProperties$line
+ fi
+ elif [ "$attributesStarted" -gt 0 ]; then
+ newProperties=$newProperties$line
+ fi
+ if [ "`echo $line | grep -E "{$"`" ]; then
+ currentLevel=$((currentLevel+1))
+ elif [ "`echo $line | grep -E "},?$"`" ]; then
+ currentLevel=$((currentLevel-1))
+ if [ "$currentLevel" == 1 ]; then
+ newTag=`date "+%s%N"`
+ newTag="version${newTag}"
+ finalJson="{ \"Clusters\": { \"desired_config\": {\"type\": \"$SITE\", \"tag\":\"$newTag\", $newProperties}}}"
+ newFile="doSet_$newTag.json"
+ echo "########## PUTting json into: $newFile"
+ echo $finalJson > $newFile
+ curl -k -u $USERID:$PASSWD -X PUT -H "X-Requested-By: ambari" "$AMBARIURL/api/v1/clusters/$CLUSTER" --data @$newFile
+ currentSiteTag
+ echo "########## NEW Site:$SITE, Tag:$SITETAG";
+ fi
+ fi
+ if [ "$attributesStarted" -eq "$currentLevel" ]; then
+ attributesStarted=0
+ fi
+ done
+}
+
+#############################################
+## doConfigFileUpdate()
+## @param File name to PUT on server
+#############################################
+doConfigFileUpdate () {
+ FILENAME=$1
+ if [ -f $FILENAME ]; then
+ if [ "1" == "$(grep -En ^\"properties\" $FILENAME | cut -d : -f 1)" ]; then
+ newTag=`date "+%s%N"`
+ newTag="version${newTag}"
+ newProperties=`cat $FILENAME`;
+ finalJson="{ \"Clusters\": { \"desired_config\": {\"type\": \"$SITE\", \"tag\":\"$newTag\", $newProperties}}}"
+ newFile="doSet_$newTag.json"
+ echo $finalJson>$newFile
+ echo "########## PUTting file:\"$FILENAME\" into config(type:\"$SITE\", tag:$newTag) via $newFile"
+ curl -k -u $USERID:$PASSWD -X PUT -H "X-Requested-By: ambari" "$AMBARIURL/api/v1/clusters/$CLUSTER" --data @$newFile
+ currentSiteTag
+ echo "########## NEW Site:$SITE, Tag:$SITETAG";
+ else
+ echo "[ERROR] File \"$FILENAME\" should be in the following JSON format (\"properties_attributes\" is optional):";
+ echo "[ERROR] \"properties\": {";
+ echo "[ERROR] \"key1\": \"value1\",";
+ echo "[ERROR] \"key2\": \"value2\",";
+ echo "[ERROR] },";
+ echo "[ERROR] \"properties_attributes\": {";
+ echo "[ERROR] \"final\": {";
+ echo "[ERROR] \"key1\": \"value1\",";
+ echo "[ERROR] \"key2\": \"value2\",";
+ echo "[ERROR] }";
+ echo "[ERROR] }";
+ exit 1;
+ fi
+ else
+ echo "[ERROR] Cannot find file \"$1\"to PUT";
+ exit 1;
+ fi
+}
+
+
+#############################################
+## doGet()
+## @param Optional filename to save to
+#############################################
+doGet () {
+ FILENAME=$1
+ if [ -n $FILENAME -a -f $FILENAME ]; then
+ rm -f $FILENAME
+ fi
+ currentSiteTag
+ echo "########## Performing 'GET' on (Site:$SITE, Tag:$SITETAG)";
+ propertiesStarted=0
+ curl -k -s -u $USERID:$PASSWD "$AMBARIURL/api/v1/clusters/$CLUSTER/configurations?type=$SITE&tag=$SITETAG" | while read -r line; do
+ # echo ">>> $line";
+ if [ "$propertiesStarted" -eq 0 ]; then
+ if [ "`echo $line | grep "\"properties\""`" -o "`echo $line | grep "\"properties_attributes\""`" ]; then
+ propertiesStarted=$currentLevel
+ fi
+ fi
+ if [ "$propertiesStarted" -gt "0" ]; then
+ if [ -z $FILENAME ]; then
+ echo $line
+ else
+ echo $line >> $FILENAME
+ fi
+ fi
+ if [ "`echo $line | grep -E "{$"`" ]; then
+ currentLevel=$((currentLevel+1))
+ elif [ "`echo $line | grep -E "},?$"`" ]; then
+ currentLevel=$((currentLevel-1))
+ fi
+ if [ "$propertiesStarted" -eq "$currentLevel" ]; then
+ propertiesStarted=0
+ fi
+ done
+}
+
+case "$1" in
+ set)
+ if (($# == 6)); then
+ doConfigUpdate "set" # Individual key
+ elif (($# == 5)); then
+ doConfigFileUpdate $5 # File based
+ else
+ usage
+ fi
+ ;;
+ get)
+ if (($# == 4)); then
+ doGet
+ elif (($# == 5)); then
+ doGet $5
+ else
+ usage
+ fi
+ ;;
+ delete)
+ if (($# != 5)); then
+ usage
+ fi
+ doConfigUpdate "delete"
+ ;;
+ *)
+ usage
+ ;;
+esac
diff --git a/package/scripts/configure_others_service.py b/package/scripts/configure_others_service.py
new file mode 100755
index 0000000..ee78ba5
--- /dev/null
+++ b/package/scripts/configure_others_service.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+import sys, os, pwd, grp, signal, time
+from resource_management import *
+
+def configureOtherService(if_ranger=False,security=False):
+ import params
+ import status_params
+ #add configurations for services:{'configuration file1':{'key1':'value1','key2':'value2',...},
+ # 'configuration file2':{'key1':'value1','key2':'value2',...}
+ # ...}
+ services_configurations = {}
+ services_configurations['core-site'] = {}
+ services_configurations['core-site']['hadoop.proxyuser.hue.groups'] = '*'
+ services_configurations['core-site']['hadoop.proxyuser.hue.hosts'] = '*'
+ services_configurations['hdfs-site'] = {}
+ services_configurations['hdfs-site']['dfs.namenode.acls.enabled'] = 'true'
+ services_configurations['hbase-site'] = {}
+ # add configurations
+ if params.if_hbase_exist:
+ services_configurations['core-site']['hadoop.proxyuser.hbase.groups'] = '*'
+ services_configurations['core-site']['hadoop.proxyuser.hbase.hosts'] = '*'
+ if if_ranger:
+ services_configurations['hbase-site']['hbase.regionserver.thrift.http'] = 'true'
+ services_configurations['hbase-site']['hbase.thrift.support.proxyuser'] = 'true'
+ if params.if_hive_exist:
+ services_configurations['core-site']['hadoop.proxyuser.hive.groups'] = '*'
+ services_configurations['core-site']['hadoop.proxyuser.hive.hosts'] = '*'
+ services_configurations['hive-site'] = {}
+ services_configurations['hive-site']['hive.security.authorization.sqlstd.confwhitelist.append'] = 'hive.server2.logging.operation.verbose'
+ services_configurations['webhcat-site'] = {}
+ services_configurations['webhcat-site']['webhcat.proxyuser.hue.groups'] = '*'
+ services_configurations['webhcat-site']['webhcat.proxyuser.hue.hosts'] = '*'
+ if if_ranger:
+ services_configurations['hive-site']['hive.server2.enable.impersonation'] = 'true'
+ if params.if_oozie_exist:
+ services_configurations['core-site']['hadoop.proxyuser.oozie.groups'] = '*'
+ services_configurations['core-site']['hadoop.proxyuser.oozie.hosts'] = '*'
+ services_configurations['oozie-site'] = {}
+ services_configurations['oozie-site']['oozie.service.ProxyUserService.proxyuser.hue.groups'] = '*'
+ services_configurations['oozie-site']['oozie.service.ProxyUserService.proxyuser.hue.hosts'] = '*'
+ if params.if_spark_exist:
+ services_configurations['core-site']['hadoop.proxyuser.spark.groups'] = '*'
+ services_configurations['core-site']['hadoop.proxyuser.spark.hosts'] = '*'
+ if params.dfs_ha_enabled:
+ services_configurations['core-site']['hadoop.proxyuser.httpfs.groups'] = '*'
+ services_configurations['core-site']['hadoop.proxyuser.httpfs.hosts'] = '*'
+ services_configurations['httpfs-site'] = {}
+ services_configurations['httpfs-site']['httpfs.proxyuser.hue.groups'] = '*'
+ services_configurations['httpfs-site']['httpfs.proxyuser.hue.hosts'] = '*'
+
+ if security:
+ services_configurations['core-site']['hadoop.proxyuser.HTTP.groups'] = '*'
+ services_configurations['core-site']['hadoop.proxyuser.HTTP.hosts'] = '*'
+ services_configurations['core-site']['hue.kerberos.principal.shortname'] = 'hue'
+ if params.if_hbase_exist:
+ services_configurations['hbase-site']['hbase.thrift.security.qop'] = 'auth'
+ services_configurations['hbase-site']['hbase.thrift.support.proxyuser'] = 'true'
+ services_configurations['hbase-site']['hbase.regionserver.thrift.http'] = 'true'
+ services_configurations['hbase-site']['hbase.thrift.kerberos.principal'] = params.HTTP_principal
+ services_configurations['hbase-site']['hbase.thrift.keytab.file'] = params.HTTP_keytab
+ services_configurations['hbase-site']['hbase.rpc.engine'] = 'org.apache.hadoop.hbase.ipc.SecureRpcEngine'
+
+ # configure
+ if isinstance(services_configurations,dict):
+ for i in range(len(services_configurations)):
+ key1 = services_configurations.keys()[i]
+ value1 =services_configurations[key1]
+ if isinstance(value1,dict):
+ for j in range(len(value1)):
+ key2 = value1.keys()[j]
+ value2 = value1[key2]
+ #/var/lib/ambari-server/resources/scripts/configs.sh -u admin -p admin set ambari-server-host cluster_name core-site "hadoop_aaa" "123456"
+ #cmd = format(params.service_packagedir + "/scripts/configs.sh -u " + params.ambari_user + " -p " + params.ambari_user_password + " set " + params.ambari_server_host + " " + params.cluster_name + " " + key1 + " " + key2 + " "+ value2)
+ cmd = format(params.service_packagedir + "/scripts/configs.sh set " + params.ambari_server_host + " " + params.cluster_name + " " + key1 + " '" + key2 + "' '"+ value2 + "'")
+ Execute(cmd)
+
+
+
+
diff --git a/package/scripts/hue_server.py b/package/scripts/hue_server.py
new file mode 100755
index 0000000..3a80b4b
--- /dev/null
+++ b/package/scripts/hue_server.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+import sys, os, pwd, grp, signal, time
+from resource_management import *
+from subprocess import call
+from hue_service import hue_service
+from configure_others_service import configureOtherService
+
+class HueServer(Script):
+ """
+ Contains the interface definitions for methods like install,
+ start, stop, status, etc. for the Hue Server
+ """
+
+ def install(self, env):
+ # Import properties defined in -config.xml file from params class
+ import params
+ env.set_params(params)
+ self.install_packages(env)
+
+ try: grp.getgrnam(params.hue_group)
+ except KeyError: Group(group_name=params.hue_group)
+ try: pwd.getpwnam(params.hue_user)
+ except KeyError: User(username=params.hue_user,
+ gid=params.hue_group,
+ groups=[params.hue_group],
+ ignore_failures=True)
+
+ Directory([params.hue_log_dir, params.hue_pid_dir],
+ mode=0755,
+ cd_access='a',
+ owner=params.hue_user,
+ group=params.hue_group,
+ )
+ File(params.hue_log,
+ mode=0644,
+ owner=params.hue_user,
+ group=params.hue_group,
+ content=''
+ )
+ Execute('cd {0}; rm -rf hue*'.format(params.hue_install_dir))
+ # Execute('yum -y install hue')
+ Execute('cd ' + params.hue_install_dir + '; cat /etc/yum.repos.d/HDP.repo | grep "baseurl" | awk -F \'=\' \'{print $2"hue/' + params.hue_package_name + '"}\' | xargs wget -O hue.tgz -a ' + params.hue_log)
+ Execute('cd {0}; tar -zxvf hue.tgz; rm -rf hue.tgz'.format(params.hue_install_dir))
+ Execute('mv {0}/hue-{1} {2}'.format(params.hue_install_dir, params.hue_version, params.hue_dir))
+ Execute('ln -s {0}/desktop/libs/hadoop/java-lib/hue-plugins-{1}-SNAPSHOT.jar /usr/hdp/current/hadoop-client/lib'.format(params.hue_dir, params.hue_version))
+ Execute('ln -s {0} /usr/hdp/current/hue-server'.format(params.hue_dir))
+ Execute('cd {0}'.format(params.hue_dir))
+ if params.hue_bin_dir == 'UNDEFINED':
+ Logger.info("Error: Hue_bin is undefined")
+ # Ensure all Hue files owned by hue
+ Execute('chown -R {0}:{1} {2}'.format(params.hue_user, params.hue_group, params.hue_dir))
+ # chown log dir
+ Execute('chown -R {0}:{1} {2}'.format(params.hue_user, params.hue_group, params.hue_log_dir))
+ Execute('find {0} -iname "*.sh" | xargs chmod +x'.format(params.service_packagedir))
+ # Setup hue
+ # Form command to invoke setup_hue.sh with its arguments and execute it
+ Execute('{0}/scripts/setup_hue.sh {1} {2} >> {3}'.format(params.service_packagedir, params.hue_dir,params.hue_user, params.hue_log))
+ Logger.info("Hue_installation is completed")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+
+ Directory(params.hue_tmp_conf,
+ mode=0755,
+ cd_access='a',
+ owner=params.hue_user,
+ group=params.hue_group,
+ )
+ File(params.hue_conf_file,
+ action = "delete",
+ owner=params.hue_user
+ )
+ log_content=InlineTemplate(params.hue_log_content)
+ File(format("{hue_conf}/log.conf"),
+ content=log_content,
+ owner=params.hue_user
+ )
+ # Write content field to hue confiuration file
+ desktop_content=InlineTemplate(params.hue_desktop_content)
+ File(format("{hue_tmp_conf}/hue.desktop.tmp.ini"),
+ content=desktop_content,
+ owner=params.hue_user
+ )
+ hadoop_content=InlineTemplate(params.hue_hadoop_content)
+ File(format("{hue_tmp_conf}/hue.hadoop.tmp.ini"),
+ content=hadoop_content,
+ owner=params.hue_user
+ )
+ hive_content=InlineTemplate(params.hue_hive_content)
+ File(format("{hue_tmp_conf}/hue.hive.tmp.ini"),
+ content=hive_content,
+ owner=params.hue_user
+ )
+ spark_content=InlineTemplate(params.hue_spark_content)
+ File(format("{hue_tmp_conf}/hue.spark.tmp.ini"),
+ content=spark_content,
+ owner=params.hue_user
+ )
+ oozie_content=InlineTemplate(params.hue_oozie_content)
+ File(format("{hue_tmp_conf}/hue.oozie.tmp.ini"),
+ content=oozie_content,
+ owner=params.hue_user
+ )
+ pig_content=InlineTemplate(params.hue_pig_content)
+ File(format("{hue_tmp_conf}/hue.pig.tmp.ini"),
+ content=pig_content,
+ owner=params.hue_user
+ )
+ hbase_content=InlineTemplate(params.hue_hbase_content)
+ File(format("{hue_tmp_conf}/hue.hbase.tmp.ini"),
+ content=hbase_content,
+ owner=params.hue_user
+ )
+ solr_content=InlineTemplate(params.hue_solr_content)
+ File(format("{hue_tmp_conf}/hue.solr.tmp.ini"),
+ content=solr_content,
+ owner=params.hue_user
+ )
+ zookeeper_content=InlineTemplate(params.hue_zookeeper_content)
+ File(format("{hue_tmp_conf}/hue.zookeeper.tmp.ini"),
+ content=zookeeper_content,
+ owner=params.hue_user
+ )
+ rdbms_content=InlineTemplate(params.hue_rdbms_content)
+ File(format("{hue_tmp_conf}/hue.rdbms.tmp.ini"),
+ content=rdbms_content,
+ owner=params.hue_user
+ )
+ Execute (format("cat {hue_tmp_conf}/hue.*.tmp.ini >> {hue_conf_file}"), user=params.hue_user)
+
+ # Call start.sh to start the service
+ def start(self, env):
+ import params
+ import status_params
+ env.set_params(params)
+ self.configure(env)
+ self.stop(env)
+ File(status_params.hue_server_pidfile,
+ mode=0644,
+ owner=params.hue_user,
+ group=params.hue_group,
+ content=''
+ )
+ Execute(format("{hue_bin_dir}/supervisor >> {hue_log} 2>&1 &"), user=params.hue_user)
+ Execute('ps -ef | grep hue | grep supervisor | grep -v grep | awk \'{print $2}\' > ' + status_params.hue_server_pidfile, user=params.hue_user)
+
+ def stop(self, env):
+ import params
+ import status_params
+ env.set_params(params)
+ # Kill the process of Hue
+ Execute ('ps -ef | grep hue | grep -v grep | grep -v livy | awk \'{print $2}\' | xargs kill -9', user=params.hue_user, ignore_failures=True)
+ File(status_params.hue_server_pidfile,
+ action = "delete",
+ owner = params.hue_user
+ )
+
+ #Called to get status of the Hue service using the pidfile
+ def status(self, env):
+ import status_params
+ env.set_params(status_params)
+ #use built-in method to check status using pidfile
+ check_process_status(status_params.hue_server_pidfile)
+
+ def usersync(self, env):
+ import params
+ env.set_params(params)
+ Execute (format("{hue_bin_dir}/hue useradmin_sync_with_unix"), user=params.hue_user)
+
+ def databasesync(self, env):
+ import params
+ env.set_params(params)
+ Execute (format("{hue_bin_dir}/hue syncdb --noinput"), user=params.hue_user)
+ Execute (format("{hue_bin_dir}/hue migrate"), user=params.hue_user)
+
+ #Called to add configurations to other service
+ def addconfigurations(self, env):
+ import params
+ env.set_params(params)
+ if_ranger = params.has_ranger_admin
+ security = params.security_enabled
+ configureOtherService(if_ranger, security)
+
+if __name__ == "__main__":
+ HueServer().execute()
diff --git a/package/scripts/hue_service.py b/package/scripts/hue_service.py
new file mode 100755
index 0000000..49fea51
--- /dev/null
+++ b/package/scripts/hue_service.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+from resource_management import *
+
+def hue_service(name=None, searchingName=None, action=None):
+
+ import params
+ role = name
+ processName = searchingName
+ cmd = format("{hue_bin_dir}/hue")
+ pid_file = format("{hue_pid_dir}/hue-{role}.pid")
+ File(pid_file,
+ mode=0644,
+ owner=params.hue_user,
+ group=params.hue_group,
+ content=''
+ )
+ if action == 'start':
+ daemon_cmd = format("{cmd} {role} &")
+ Execute(daemon_cmd, user = params.hue_user)
+ Execute('ps -ef | grep hue | grep -v grep | grep ' + processName + ' | awk \'{print $2}\' > ' + pid_file, user = params.hue_user)
+
+ elif action == 'stop':
+ Execute (format("cat {pid_file} | xargs kill -9"), user=params.hue_user, ignore_failures=True)
+ File(pid_file,
+ action = "delete",
+ owner=params.hue_user
+ )
diff --git a/package/scripts/livy_server.py b/package/scripts/livy_server.py
new file mode 100755
index 0000000..6cb69b2
--- /dev/null
+++ b/package/scripts/livy_server.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+import sys, os
+from resource_management import *
+from hue_service import hue_service
+from subprocess import call
+
+SERVICE_NAME = "livy_server"
+
+class LivyServer(Script):
+
+ """
+ Contains the interface definitions for methods like install,
+ start, stop, status, etc. for the Livy Server
+ """
+
+ def install(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env)
+ Logger.info("Livy Server installation is completed")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ Logger.info("Livy Server configuration is completed")
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env)
+ Execute('ps -ef | grep hue | grep livy | grep -v grep | awk \'{print $2}\' | xargs kill -9', user=params.hue_user, ignore_failures=True)
+ hue_service(SERVICE_NAME, 'livy', action = 'start')
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+ hue_service(SERVICE_NAME, 'livy', action = 'stop')
+
+ #Called to get status of the Livy server using the pidfile
+ def status(self, env):
+ import status_params
+ env.set_params(status_params)
+ #use built-in method to check status using pidfile
+ check_process_status(status_params.hue_livyserver_pidfile)
+
+if __name__ == "__main__":
+ LivyServer().execute()
\ No newline at end of file
diff --git a/package/scripts/params.py b/package/scripts/params.py
new file mode 100755
index 0000000..07105c1
--- /dev/null
+++ b/package/scripts/params.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+import status_params
+import os
+import re
+from resource_management import *
+from ambari_commons.os_check import OSCheck
+from ambari_commons.str_utils import cbool, cint
+
+# config object that holds the configurations declared in the -config.xml file
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+#e.g. /var/lib/ambari-agent/cache/stacks/HDP/2.3/services/HUE/package
+service_packagedir = os.path.realpath(__file__).split('/scripts')[0]
+hostname = config['hostname']
+cluster_name = str(config['clusterName'])
+ambari_server_host = default("/clusterHostInfo/ambari_server_host", [])[0]
+
+hue_apps = ['security','pig','filebrowser','jobbrowser','zookeeper','search','rdbms','metastore','spark','beeswax','jobsub','hbase','oozie','indexer']
+# Comma separated list of apps to not load at server startup.
+app_blacklists = ['security','sqoop','impala']
+
+# Configurations of security and kerberos
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+if security_enabled:
+ HTTP_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+ HTTP_keytab = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.keytab']
+ hue_principal = config['configurations']['hue-Desktop']['hue.kerberos.principal'].replace('_HOST',hostname.lower())
+ hue_keytab = config['configurations']['hue-Desktop']['hue.kerberos.keytab']
+ kinit_path = config['configurations']['hue-Desktop']['kinit.path']
+ zk_principal = config['configurations']['zookeeper-env']['zookeeper_principal_name'].replace('_HOST',hostname.lower())
+ zk_keytab = config['configurations']['zookeeper-env']['zookeeper_principal_name']
+
+# Configurations of HDFS
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+namenode_host.sort()
+namenode_address = None
+if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
+ namenode_rpcaddress = config['configurations']['hdfs-site']['dfs.namenode.rpc-address']
+ namenode_address = format("hdfs://{namenode_rpcaddress}")
+else:
+ namenode_address = config['configurations']['core-site']['fs.defaultFS']
+# To judge whether the namenode HA mode
+logical_name = ''
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+dfs_ha_namemodes_ids_list = []
+if dfs_ha_namenode_ids:
+ dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+ dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+ if dfs_ha_namenode_ids_array_len > 1:
+ dfs_ha_enabled = True
+if dfs_ha_enabled:
+ # Hostname of the active and standby HDFS Namenode (only used when HA is enabled)
+ dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
+ dfs_ha_namenode_standby = default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
+ namenode_address = format('hdfs://{dfs_ha_nameservices}')
+ logical_name = dfs_ha_nameservices
+ hdfs_httpfs_host = config['configurations']['hue-Hadoop']['HDFS.HttpFS.host']
+ # if kerberos is disabled, using HttpFS . Otherwise using WebHDFS.
+ if hdfs_httpfs_host in namenode_host and not security_enabled:
+ webhdfs_url = format('http://' + hdfs_httpfs_host + ':14000/webhdfs/v1')
+ else:
+ webhdfs_url = format('http://' + hdfs_httpfs_host + ':50070/webhdfs/v1')
+else:
+ dfs_namenode_http_address = config['configurations']['hdfs-site']['dfs.namenode.http-address']
+ webhdfs_url = format('http://' + dfs_namenode_http_address + '/webhdfs/v1')
+
+# Configurations of Yarn
+resourcemanager_hosts = default("/clusterHostInfo/rm_host", [])
+resourcemanager_port = config['configurations']['yarn-site']['yarn.resourcemanager.address'].split(':')[-1]
+resourcemanager_ha_enabled = False
+if len(resourcemanager_hosts) > 1:
+ resourcemanager_ha_enabled = True
+if resourcemanager_ha_enabled:
+ resourcemanager_host1 = config['configurations']['yarn-site']['yarn.resourcemanager.hostname.rm1']
+ resourcemanager_host2 = config['configurations']['yarn-site']['yarn.resourcemanager.hostname.rm2']
+ resourcemanager_webapp_address1 = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address.rm1']
+ resourcemanager_webapp_address2 = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address.rm2']
+ resourcemanager_api_url1 = format('http://{resourcemanager_webapp_address1}')
+ resourcemanager_api_url2 = format('http://{resourcemanager_webapp_address2}')
+ proxy_api_url1 = resourcemanager_api_url1
+ proxy_api_url2 = resourcemanager_api_url2
+else:
+ resourcemanager_host1 = resourcemanager_hosts[0]
+ resourcemanager_webapp_address1 = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']
+ resourcemanager_api_url1 = format('http://{resourcemanager_webapp_address1}')
+ proxy_api_url1 = resourcemanager_api_url1
+histroryserver_host = default("/clusterHostInfo/hs_host", [])
+history_server_api_url = format('http://{histroryserver_host[0]}:19888')
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+# Configurations of Oozie
+# Pig and Jobsub service are depended on oozie in Hue
+oozie_servers_hosts = default("/clusterHostInfo/oozie_server", [])
+if_oozie_exist = False
+if len(oozie_servers_hosts) == 0:
+ app_blacklists.append('pig')
+ app_blacklists.append('jobsub')
+ app_blacklists.append('oozie')
+else:
+ if_oozie_exist = True
+ oozie_url = config['configurations']['oozie-site']['oozie.base.url']
+
+# Configurations of Solr
+solr_master_hosts = default("/clusterHostInfo/solr_master_hosts", [])
+solr_master_hosts.sort()
+if_solr_exist = False
+if len(solr_master_hosts) == 0:
+ app_blacklists.append('search')
+else:
+ if_solr_exist = True
+ solr_port = config['configurations']['solr-env']['solr.port']
+ solr_znode = config['configurations']['solr-config']['solr.znode']
+ solr_master_host = solr_master_hosts[0]
+ solr_url = format('http://' + solr_master_host + ':' + str(solr_port) + solr_znode + '/')
+
+# Configurations of Hive and Pig
+# Hive service is depended on Pig in ambari
+hive_server_hosts = default("/clusterHostInfo/hive_server_host", [])
+if_hive_exist = False
+if_pig_exist = False
+if len(hive_server_hosts) == 0:
+ app_blacklists.append('beeswax')
+ app_blacklists.append('metastore')
+ app_blacklists.append('pig')
+else:
+ if_hive_exist = True
+ if_pig_exist = True
+ hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
+ hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
+ if hive_transport_mode.lower() == "http":
+ hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
+ else:
+ hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
+
+# Configurations of Hbase
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hbase_clusters = []
+if_hbase_exist = False
+if len(hbase_master_hosts) == 0:
+ app_blacklists.append('hbase')
+else:
+ if_hbase_exist = True
+ for i in range(len(hbase_master_hosts)):
+ hbase_clusters.append(format("(Cluster" + str(i+1) + "|" + hbase_master_hosts[i] + ":9090)"))
+ hbase_cluster = ",".join(hbase_clusters)
+
+# Configurations of Zookeeper
+zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+zookeeper_hosts.sort()
+zookeeper_client_port = default('/configurations/zoo.cfg/clientPort', None)
+zookeeper_host_ports = []
+if len(zookeeper_hosts) == 0:
+ app_blacklists.append('zookeeper')
+else:
+ if zookeeper_client_port is not None:
+ for i in range(len(zookeeper_hosts)):
+ zookeeper_host_ports.append(format(zookeeper_hosts[i] + ":{zookeeper_client_port}"))
+ else:
+ for i in range(len(zookeeper_hosts)):
+ zookeeper_host_ports.append(format(zookeeper_hosts[i] + ":2181"))
+ zookeeper_host_port = ",".join(zookeeper_host_ports)
+ rest_url = format("http://" + zookeeper_hosts[0] + ":9998")
+
+# Configurations of Spark
+# Livy service is depended on Spark thriftserver
+spark_thriftserver_hosts = default("/clusterHostInfo/spark_thriftserver_hosts", [])
+spark_thriftserver_host = "localhost"
+spark_hiveserver2_thrift_port = "10002"
+if_spark_exist = False
+if len(spark_thriftserver_hosts) == 0:
+ app_blacklists.append('spark')
+else:
+ if_spark_exist = True
+ spark_thriftserver_host = spark_thriftserver_hosts[0]
+ spark_hiveserver2_thrift_port = str(config['configurations']['spark-hive-site-override']['hive.server2.thrift.port']).strip()
+ spark_jobhistoryserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", [])
+ if len(spark_jobhistoryserver_hosts) > 0:
+ spark_history_server_host = spark_jobhistoryserver_hosts[0]
+ else:
+ spark_history_server_host = "localhost"
+ spark_history_ui_port = config['configurations']['spark-defaults']['spark.history.ui.port']
+ spark_history_server_url = format("http://{spark_history_server_host}:{spark_history_ui_port}")
+
+# Configurations of Hue metastore database
+metastore_database_engines = ['sqlite3','mysql','postgresql_psycopg2','oracle']
+metastore_database_engine = config['configurations']['hue-Desktop']['metastore.database.engine'].strip().lower()
+metastore_database_host = config['configurations']['hue-Desktop']['metastore.database.host']
+metastore_database_port = str(config['configurations']['hue-Desktop']['metastore.database.port']).strip()
+metastore_database_name = config['configurations']['hue-Desktop']['metastore.database.name'].strip()
+metastore_database_user = config['configurations']['hue-Desktop']['metastore.ConnectionUserName'].strip()
+metastore_database_password = str(config['configurations']['hue-Desktop']['metastore.ConnectionPassword']).strip()
+metastore_databass_options = config['configurations']['hue-Desktop']['metastore.database.options'].strip()
+if metastore_database_engine not in metastore_database_engines or not metastore_database_engine:
+ metastore_database_engine = 'sqlite3'
+
+# Configurations of RDBMS
+RDBMS_database_engines = ['sqlite','mysql','postgresql','oracle']
+RDBMS_database_engine = config['configurations']['hue-RDBMS']['Database.engine'].strip().lower()
+RDBMS_nice_name = config['configurations']['hue-RDBMS']['Nice.name'].strip()
+RDBMS_database_host = config['configurations']['hue-RDBMS']['Database.host']
+RDBMS_database_port = str(config['configurations']['hue-RDBMS']['Database.port']).strip()
+RDBMS_database_name = config['configurations']['hue-RDBMS']['Database.name'].strip()
+RDBMS_database_user = config['configurations']['hue-RDBMS']['Database.user'].strip()
+RDBMS_database_password = str(config['configurations']['hue-RDBMS']['Database.password']).strip()
+RDBMS_options = config['configurations']['hue-RDBMS']['options'].strip()
+if RDBMS_database_engine not in RDBMS_database_engines or not RDBMS_database_engine:
+ RDBMS_database_engine = 'sqlite'
+ RDBMS_database_name = '/usr/local/hue/desktop/desktop.db'
+
+user_app_blacklists = config['configurations']['hue-Desktop']['app.blacklist'].split(',')
+if len(user_app_blacklists) > 0:
+ for user_app_blacklist in user_app_blacklists:
+ if user_app_blacklist in hue_apps and user_app_blacklist not in app_blacklists:
+ app_blacklists.append(user_app_blacklist)
+app_blacklist = ','.join(app_blacklists)
+
+# Ranger hosts
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# Configurations of Hue
+hue_install_dir = '/usr/local'
+hue_dir = hue_install_dir + '/hue'
+hue_conf = hue_dir + '/desktop/conf'
+hue_conf_file = format("{hue_conf}/pseudo-distributed.ini")
+hue_bin_dir = hue_dir + '/build/env/bin'
+hue_tmp_conf= tmp_dir + '/hue_tmp_conf'
+hue_user = config['configurations']['hue-env']['hue.user']
+hue_group = config['configurations']['hue-env']['hue.group']
+hue_log_dir = config['configurations']['hue-env']['hue.log.dir']
+hue_pid_dir = config['configurations']['hue-env']['hue.pid.dir']
+hue_port = config['configurations']['hue-env']['hue.port']
+hue_package_name = config['configurations']['hue-env']['hue.package.name']
+hue_version = config['configurations']['hue-env']['hue.version']
+hue_log = format("{hue_log_dir}/hue-install.log")
+secret_key = config['configurations']['hue-Desktop']['secret.key']
+
+hue_desktop_content = config['configurations']['hue-Desktop']['content']
+hue_hadoop_content = config['configurations']['hue-Hadoop']['content']
+hue_hive_content = config['configurations']['hue-Hive']['content']
+hue_spark_content = config['configurations']['hue-Spark']['content']
+hue_oozie_content = config['configurations']['hue-Oozie']['content']
+hue_pig_content = config['configurations']['hue-Pig']['content']
+hue_hbase_content = config['configurations']['hue-Hbase']['content']
+hue_solr_content = config['configurations']['hue-Solr']['content']
+hue_zookeeper_content = config['configurations']['hue-Zookeeper']['content']
+hue_rdbms_content = config['configurations']['hue-RDBMS']['content']
+hue_log_content = config['configurations']['hue-log4j-env']['content']
+
+
diff --git a/package/scripts/setup_hue.sh b/package/scripts/setup_hue.sh
new file mode 100755
index 0000000..c395ddc
--- /dev/null
+++ b/package/scripts/setup_hue.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#Path to install HUE to e.g. /usr/local/hue
+HUE_PATH=$1
+# Add Hue user
+HUE_USER=$2
+echo "Starting Hue install"
+getent passwd $HUE_USER
+if [ $? -eq 0 ]; then
+ echo "the user exists, no need to create"
+else
+ echo "creating hue user"
+ adduser $HUE_USER
+fi
+hadoop fs -test -d /user/$HUE_USER
+if [ $? -eq 1 ]; then
+ echo "Creating user dir in HDFS"
+ sudo -u hdfs hdfs dfs -mkdir -p /user/$HUE_USER
+ sudo -u hdfs hdfs dfs -chown $HUE_USER /user/hue
+fi
+
+# compile source code
+# cd $HUE_PATH
+# /usr/bin/make ./apps
+# chown -R hue:hue $HUE_PATH
+
+#add the environment variable to /etc/profile
+sed -i '$a## ------------SPARK_HOME and HADOOP_CONF_DIR--------------------- ##' /etc/profile
+sed -i '$aexport SPARK_HOME=/usr/hdp/current/spark-client' /etc/profile
+sed -i '$aexport PATH=$PATH:$SPARK_HOME/bin:$SPARK_HOME/sbin' /etc/profile
+sed -i '$aexport HADOOP_CONF_DIR=/usr/hdp/current/hadoop-client/conf' /etc/profile
+source /etc/profile
+
diff --git a/package/scripts/status_params.py b/package/scripts/status_params.py
new file mode 100755
index 0000000..cd0297c
--- /dev/null
+++ b/package/scripts/status_params.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+from resource_management import *
+
+config = Script.get_config()
+
+hue_pid_dir = config['configurations']['hue-env']['hue.pid.dir']
+hue_server_pidfile = format("{hue_pid_dir}/hue-server.pid")
+hue_livyserver_pidfile = format("{hue_pid_dir}/hue-livy_server.pid")
\ No newline at end of file