from pyhive import hive
import re, os, time
import boto3
import json
import sys, getopt


def main(argv):
    regionName = ''
    secretId = ''
    hostName = ''

    try:
      if (len(argv) < 6):
        print("Usage: python3 demo-hive-beeline.py -r regionName -s secretId -h hostName")
        sys.exit(2)
      opts, args = getopt.getopt(argv, "r:s:h:", ["region_name=", "secret_id=", "host_name="])
    except getopt.GetoptError:
      print("Usage: python3 demo-hive-beeline.py -r regionName -s secretId -h hostName")
      sys.exit(2)
    for opt, arg in opts:
      if opt in ("-r", "--region_name"):
        regionName = arg
      if opt in ("-s", "--secret_id"):
        secretId = arg
      if opt in ("-h", "--host_name"):
        hostName = arg
    if regionName == '' or secretId == '' or hostName == '':
        print("Usage: python3 demo-hive-beeline.py -r regionName -s secretId -h hostName")
        sys.exit(2)

#Delete these three lines
    #print("Region name: {}, Secret ID: {}, Hostname: {}".format(regionName, secretId, hostName))
#

    secrets_client = boto3.client('secretsmanager',region_name=regionName)
    secrets_response = secrets_client.get_secret_value(
        SecretId=secretId,
        VersionStage='AWSCURRENT'
    )
    user = json.loads(secrets_response['SecretString'])["userName"]
    passwd = json.loads(secrets_response['SecretString'])["password"]
    domain = json.loads(secrets_response['SecretString'])["domain"]
    host_name = hostName
    port = 10000 ###Check port forwarding if using localhost to execute this script.
    user = str(user) + "@" + str(domain)
    password = str(passwd)
    database = "default"
    conn = hive.Connection(host=host_name, port=port, username=user, password=password, database=database, auth='LDAP')
#print(conn)
    cur = conn.cursor()
    cur.execute('use default')
    cur.execute('drop table nyc_taxi_parquet')
    cur.execute("CREATE EXTERNAL TABLE IF NOT EXISTS `nyc_taxi_parquet`(`vendorid` bigint,`tip_amount` double) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' LOCATION 's3://aws-bigdata-blog/artifacts/aws-blog-emr-ad/data/' TBLPROPERTIES ('CrawlerSchemaDeserializerVersion'='1.0','CrawlerSchemaSerializerVersion'='1.0','UPDATED_BY_CRAWLER'='nyctaxi_yellow_parquet','averageRecordSize'='52','classification'='parquet','compressionType'='none','objectCount'='28','recordCount'='17251952','sizeKey'='341952085','typeOfData'='file')")
    cur.execute('show tables')
    cur.execute('select * from nyc_taxi_parquet limit 5')
    result = cur.fetchall()
    #print(result)
    print('select * from nyc_taxi_parquet limit 5;\n\n')
    print("Vendor ID\t|\tTip Amount")
    print("-----------------------------------------")
    for eachItem in result:
      print("{}\t\t|\t{}".format(eachItem[0], eachItem[1]))

if __name__ == "__main__":
   main(sys.argv[1:])
