Ad Widget

Collapse

host uptime by fetching clock,value from history_uint table in python3

Collapse
X
 
  • Time
  • Show
Clear All
new posts
  • Anna Patil
    Junior Member
    • May 2023
    • 1

    #1

    host uptime by fetching clock,value from history_uint table in python3

    Hi All,

    my first try to get daily uptime and average latency from history_uint table against each host through python. There are two databases, one is zabbix 6.2.0's own database, where are second database is for collecting data and storing for daily report. is my calculation for uptime is correct?

    Code:
    import requests
    import json
    import pandas as pd
    import datetime
    import pymysql
    from datetime import datetime, timedelta
    import os
    
    # MySQL database connection credentials
    host = 'localhost'
    port = 3306
    username = 'xxxx'
    password = 'xxxx'
    database = 'zabbix'
    
    
    # LocalMySQL database connection credentials
    lhost = 'localhost'
    lport = 3306
    lusername = 'xxxx'
    lpassword = 'xxxx@123'
    ldatabase = 'zabbixdb'
    
    
    
    # Extract Token from login
    def generate_token(url, username, password):
        payload = {
            "jsonrpc": "2.0",
            "method": "user.login",
            "params": {
                "user": username,
                "password": password
            },
            "id": 1,
        }
        return api_request(url, payload)
    
    
    def api_request(url, payload):
        if not payload: return
        headers = {'Content-Type': 'application/json-rpc'}
        # make the API request
        response = requests.post(url, headers=headers, json=payload)
        print(response.status_code)
        if response.status_code == 200:
            print('response code: {}'.format(response.status_code))
        if not response.status_code == 200:
            print("unsuccessfull")
            return  # Unsuccessful
    
        response_data = json.loads(response.text)
        # extract the data from the API response.
        if not 'result' in response_data:
            print("No results found: {}".format(response_data['error']['data']))
            return  # No results found
    
        data = json.loads(response.text)["result"]
        return data
    
    
    def item_get(url, token):
        payload = {
            "jsonrpc": "2.0",
            "method": "item.get",
            "params": {
                "output": ["hostid", "host","itemid", "name", "key_", "units"],
                "search": {'key_': 'icmpping'}  # Corrected syntax error here
            },
            "id": 1,
            "auth": token
        }
        return api_request(url, payload)
    
    def host_get(url, token):
        payload = {
            "jsonrpc": "2.0",
            "method": "host.get",
            "params": {
                "monitored_hosts": 1,
                "output": ["hostid", "host", "proxy_hostid"],
                "selectInterfaces": ["interfaceid", "ip"],
                "selectGroups": ["groupid", "name"]
            },
            "id": 2,
            "auth": token
        }
        return api_request(url, payload)
    
    
    ####
    url = "http://xxxx/zabbix/api_jsonrpc.php"
    zusername = "xxxx"
    zpassword = "xxxx"
    #start = datetime.datetime.now()
    token = generate_token(url, zusername, zpassword)
    
    # creating DF for item
    df1 = pd.DataFrame(item_get(url, token))
    # DF for host details
    df2 = pd.DataFrame(host_get(url, token))
    #merging Dataframes
    df = pd.merge(df1, df2, on="hostid", how="inner")
    
    #seperating group,groupid,and host ipaddress
    df['groupid'] = [x[0]['groupid'] if len(x) > 0 else '' for x in df['groups']]
    df['groupname'] = [x[0]['name'] if len(x) > 0 else '' for x in df['groups']]
    df['ipaddress'] = [x[0]['ip'] if len(x) > 0 else '' for x in df['interfaces']]
    #drop unwanted column
    df = df.drop(['groups','interfaces'], axis=1)
    #preparing ICMP Dataframe
    icmp_df = df[df['key_'] == 'icmpping']
    #preparing icmp loss dataframe
    icmppingloss_df = df[df['key_'] == 'icmppingloss']
    #preparing latency Dataframe
    latency_df = df[df['key_'] == 'icmppingsec']
    
    
    # Define the time range for the last 24 hours
    yesterday = datetime.now() - timedelta(days=1)
    time_from = datetime(yesterday.year, yesterday.month, yesterday.day, 0, 0, 0).timestamp()
    time_till = datetime(yesterday.year, yesterday.month, yesterday.day, 23, 59, 59).timestamp()
    
    # MySQL connection
    conn = pymysql.connect(host=host, port=port, user=username, password=password, database=database)
    cursor = conn.cursor()
    
    # Local MySQL connection
    conn1 = pymysql.connect(host=lhost, port=lport, user=lusername, password=lpassword, database=ldatabase)
    cursor1 = conn1.cursor()
    
    
    # Iterate over each itemid for availability
    for index, row in icmp_df.iterrows():
        itemid = row[0]
        hostid = row[1]
        ipaddress = row[9]
        query = f"SELECT clock, value FROM history_uint WHERE itemid = {itemid} AND clock BETWEEN {time_from} AND {time_till}"
        cursor.execute(query)
        history_data = cursor.fetchall()
    
        # Process the history data and calculate availability
        value_count = len(history_data)
        availability_count = sum(int(row[1]) for row in history_data)
    
        # Calculate availability percentage
        availability = (availability_count / value_count) * 100
        formatted_availability = "{:.2f}".format(availability)
        # Prepare the INSERT query
        insert_query = f"INSERT INTO dailyavailability (hostid, ipaddr,itemid, uptime, date) VALUES ({hostid}, '{ipaddress}', {itemid}, {formatted_availability}, CURDATE())"
        print(insert_query)
        try:
            # Execute the INSERT query
            cursor1.execute(insert_query)
            conn1.commit()
            print(f"Data inserted for HostID: {hostid}, ItemID: {itemid}")
        except Exception as e:
            print(f"Error occurred while inserting data for HostID: {hostid}, ItemID: {itemid}")
            print(f"Error message: {str(e)}")
            conn1.rollback()
    
    # Iterate over each itemid for latency
    for index, row in latency_df.iterrows():
        itemid = row['itemid']
        hostid = row['hostid']
    
        # Retrieve data from history table for latency
        query = f"SELECT value FROM history WHERE itemid = {itemid} AND clock BETWEEN {time_from} AND {time_till}"
        cursor.execute(query)
        history_data = cursor.fetchall()
    
        # Extract the latency values from the query result and convert to milliseconds
        latency_values = [float(row[0]) * 1000 for row in history_data]
    
        # Calculate the average latency in milliseconds
        if latency_values:
            average_latency = sum(latency_values) / len(latency_values)
            formatted_average_latency = "{:.2f}".format(average_latency)
    
            # Prepare the UPDATE query
            update_query = f"UPDATE dailyavailability SET latency = {formatted_average_latency} WHERE hostid = {hostid} AND date= CURDATE() AND latency is NULL"
            print(update_query)
            try:
                # Execute the UPDATE query
                cursor1.execute(update_query)
                conn1.commit()
                print(f"Latency updated for HostID: {hostid}, ItemID: {itemid}")
            except Exception as e:
                print(f"Error occurred while updating latency for HostID: {hostid}, ItemID: {itemid}")
                print(f"Error message: {str(e)}")
                conn1.rollback()
        else:
            print(f"HostID: {hostid} ItemID: {itemid} No latency data available")
    
    # Close the database connections
    cursor.close()
    conn.close()
    cursor1.close()
    conn1.close()​
Working...