import json import pandas as pd from openpyxl import Workbook from datetime import datetime from firebase_admin import db, auth import json from datetime import datetime import time import json import requests import threading import firebase_admin from firebase_admin import db, credentials from firebase_admin import firestore from google.cloud.firestore_v1.base_query import FieldFilter FIELDS = 5 ### Initial Number of fields that you want to process BATCH_SIZE = 15 ### Batch Processing Size ################# Number of Fields ################# def getDictFromFile(path): with open(path, "r", encoding="utf-8") as f: return json.loads(f.read()) def get_jeevan_ai_advisory_tree(UID, FieldID, sensed_day, db_firestore): # Reference to the collection collection_ref = db_firestore.collection("JEEVN_AI_advisories") # Query with SensedDay if it is not null docs = ( collection_ref.where(filter=FieldFilter("UID", "==", UID)) .where(filter=FieldFilter("fieldID", "==", FieldID)) .where(filter=FieldFilter("SensedDay", "==", sensed_day)) .order_by("__name__", direction=firestore.Query.DESCENDING) .limit(1) .get() ) advisory = {} if docs: data = [doc.to_dict() for doc in docs] return data[0] else: advisory["advisory_status"] = "no_data" return {"advisory": advisory} def get_jeevan_ai_advisory(UID, FieldID, PolygonID, crop): # "UID": "TCXcp5VIsfhHZrh0nm2VsgBtcGy2", # "FieldID":"1611730411677", # "Crop":"oil palm", # "PolygonID":"1611911952951" endpointUrl = "https://us-central1-farmbase-b2f7e.cloudfunctions.net/askJeevnAPI2" bodyObj = { "UID": UID, "FieldID": FieldID, # "1711985970653", "Crop": crop, "PolygonID": PolygonID, # "grape", # "SowingMonth": sowing_month, # 0 } response = requests.post(endpointUrl, json=bodyObj) print(PolygonID, " Status code: ", response.status_code) # print("Printing Entire Post Request") # print(FieldID, response.json()) return response.json() def get_sat_image_sensedday(UID, fieldId, SensedDay, ImageType): endpointUrl = "https://us-central1-farmbase-b2f7e.cloudfunctions.net/getFieldImage" bodyObj = { "UID": UID, "FieldID": fieldId, "ImageType": ImageType, "SensedDay": SensedDay, "Colormap": "1", } response = requests.post(endpointUrl, json=bodyObj) if response.status_code == 200: url = response.json().get("url") return {"url": url} else: return {"msg": "URL does not exist"} def calculate_pixel_count(image_url, ImageType): from PIL import Image import numpy as np import requests from io import BytesIO # Function to download image from URL def download_image_from_url(url): response = requests.get(url) if response.status_code == 200: image = Image.open(BytesIO(response.content)) return image else: print("Failed to download image") return None # Open the image # image = Image.open(image_url) image = download_image_from_url(image_url) # Convert the image to a numpy array image_array = np.array(image) # Define the color palette colors = { (255, 255, 255, 255): "White", # FFFFFF (0, 0, 0, 0): "EmptyColor", (17, 167, 95, 255): "Green", # #11a75f #11A75F (145, 16, 44, 255): "Red", # #91102c #91102C (247, 136, 90, 255): "Orange", # F7885A (60, 19, 97, 255): "Purple", # #3c1361 #3C1361 } # Count pixels for each color color_counts = {} total_pixels = 0 # Initialize total pixel count for color, name in colors.items(): if name != "EmptyColor": # Exclude "EmptyColor" count = np.sum(np.all(image_array == color, axis=-1)) total_pixels += count # Increment total_pixels color_counts[name] = count # Calculate percentage for each color color_percentages = {} for name, count in color_counts.items(): percentage = (count / total_pixels) * 100 color_percentages[name] = percentage return color_percentages def process_one_field_data_senseday( UID, fieldId, main_fieldID, sensed_day, field_data_list, jeevan_ai_data_list, db_firestore, ): if UID == "TCXcp5VIsfhHZrh0nm2VsgBtcGy2" and main_fieldID is not None: str_ref_db_userId = ( "/PaidMonitoredFields/PMF/" + UID + "/" + str(main_fieldID) + "/Polygons/" + str(fieldId) ) else: str_ref_db_userId = "/PaidMonitoredFields/PMF/" + UID + "/" + str(fieldId) ref = db.reference(f"{str_ref_db_userId}") # print("ref", ref, str_ref_db_userId) if ref.get() is None: return {"message": "please enter the vaild fieldid"} if ref.child("Expired").get() == str(1) or ref.child("Paused").get() == "yes": return {"message": "field is paused or expired"} coordinates_db = ref.child("Coordinates").get() if coordinates_db is None: return name = ref.child("Name").get() phone = ref.child("PhoneNumber").get() if name is None or phone is None: return {"message": "name or phone number is missing"} lastSatelliteVisit = ( sensed_day if sensed_day != "latestdate" else ref.child("LatestDay").get() ) if lastSatelliteVisit is None: sensed_days = ref.child("SensedDays").get() # print("sensed_days",sensed_days) lastSatelliteVisit = list(sensed_days.keys())[-1] crop_name = "Unknown Crop" advisory = None # print("Processing", fieldId, main_fieldID) if UID == "TCXcp5VIsfhHZrh0nm2VsgBtcGy2": crop_name = "oil palm" else: crop_code_dict = db.reference("/CropCodes").get() crops = ref.child("FieldCrops").get() if crops: crop_name = crops[0] else: crop_code = ref.child("CropCode").get() if crop_code: crop_name = crop_code_dict.get(crop_code, "Unknown Crop") else: print("no crop code available") if crop_name != "Unknown Crop": advisory_tree = get_jeevan_ai_advisory( UID, main_fieldID, fieldId, crop_name, ) fieldName = name + "_" + phone # advisory_tree.get("fieldName") fieldArea = advisory_tree.get("fieldArea") # lastSatelliteVisit = advisory_tree.get("lastSatelliteVisit") # coordinates_db = advisory_tree.get("coordinates") maxLat = advisory_tree.get("maxLat") maxLon = advisory_tree.get("maxLon") minLat = advisory_tree.get("minLat") minLon = advisory_tree.get("minLon") image_url = advisory_tree.get("url") Satellite_Data = advisory_tree.get("Satellite_Data") # print( # [ # fieldName, # fieldArea, # lastSatelliteVisit, # coordinates_db, # image_url, # Satellite_Data, # maxLat, # ] # ) if fieldName is None: fieldName = ref.child("FieldDescription").get() if fieldName is None: fieldName = ref.child("FieldAddress").get() if fieldName is None: fieldName = ref.child("Name").get() if fieldName is None: fieldName = ref.child("Phone").get() if fieldName is None: fieldName = "" if fieldArea is None: fieldArea = ref.child("FieldArea").get() if fieldArea is None: fieldArea = ref.child("Area").get() # fieldArea = "{:,} sq m".format(fieldArea) # Convert square meters to area and format it to two decimal places # fieldArea = "{:.3f} hectares".format(fieldArea / 10000) # Convert square meters to acres and format it to two decimal places fieldArea = "{:.3f} acres".format(fieldArea * 0.000247105) # print(fieldId, "SatelliteVisit:", lastSatelliteVisit) # if lastSatelliteVisit is None: # ## skip # return # Define custom sorting order lat_lon_pairs = [] def custom_sort(key): if key == "a": return 0 elif key.startswith("P_"): return int(key[2:]) # Sort the keys of the Coordinates dictionary using the custom sorting function sorted_coordinates_keys = sorted( coordinates_db.keys(), key=custom_sort, ) # Iterate over the sorted keys for point in sorted_coordinates_keys: coordinates = coordinates_db[point] latitude = float(coordinates["Latitude"]) longitude = float(coordinates["Longitude"]) lat_lon_pairs.append([latitude, longitude]) if lat_lon_pairs: lat_lon_pairs.append(lat_lon_pairs[0]) if maxLat == None or minLat == None or maxLon == None or minLon == None: # Separate the latitude and longitude values latitudes = [coord[0] for coord in lat_lon_pairs] longitudes = [coord[1] for coord in lat_lon_pairs] maxLat = max(latitudes) maxLon = max(longitudes) minLat = min(latitudes) minLon = min(longitudes) row_dict = { "fieldID": fieldId, "fieldName": fieldName, "fieldName_Senseday": fieldName + "_" + lastSatelliteVisit, "fieldArea": fieldArea, "lastSatelliteVisit": datetime.strptime( lastSatelliteVisit, "%Y%m%d" ).strftime("%d %B %Y"), "coordinates": lat_lon_pairs, "maxLat": maxLat, "maxLon": maxLon, "minLat": minLat, "minLon": minLon, } if UID == "TCXcp5VIsfhHZrh0nm2VsgBtcGy2": ImageType = "ndvi" ## TOOD :- fetch from database ## image_url = ( "https://storage.googleapis.com/farmbase-b2f7e.appspot.com/PaidMonitoredFieldsPolygons/TCXcp5VIsfhHZrh0nm2VsgBtcGy2/" + str(main_fieldID) + "/" + str(fieldId) + "/" + str(lastSatelliteVisit) + "/NDVI.png" ) Satellite_Data = { "white": None, "green": None, "orange": None, "purple": None, "red": None, } else: ImageType = "hybrid" if image_url is None: image_url_response = get_sat_image_sensedday( UID, fieldId, lastSatelliteVisit, ImageType ) if "msg" in image_url_response: image_url = None Satellite_Data = { "white": None, "green": None, "orange": None, "purple": None, "red": None, } else: image_url = image_url_response["url"] # Calculate pixel count percentage_data = calculate_pixel_count(image_url, ImageType) Satellite_Data = { "white": "{:.2f}%".format(float(percentage_data["White"])), "green": "{:.2f}%".format(float(percentage_data["Green"])), "orange": "{:.2f}%".format(float(percentage_data["Orange"])), "purple": "{:.2f}%".format(float(percentage_data["Purple"])), "red": "{:.2f}%".format(float(percentage_data["Red"])), } row_dict["url"] = image_url row_dict["Satellite_Data"] = Satellite_Data advisory = advisory_tree.get("advisory") advisory["Crop"] = advisory_tree.get("Crop") if advisory_tree.get("SARDay"): row_dict["lastSatelliteVisitSAR"] = datetime.strptime( advisory_tree.get("SARDay"), "%Y%m%d" ).strftime("%d %B %Y") jeevan_ai_row_dict = {fieldId + "_" + lastSatelliteVisit: advisory} #### Update excel file if UID == "TCXcp5VIsfhHZrh0nm2VsgBtcGy2": update_excel_file( phone, name, fieldId, main_fieldID, lastSatelliteVisit, advisory ) else: print(fieldId, "Crop code not in database") if advisory and row_dict: field_data_list.append(row_dict) jeevan_ai_data_list.append(jeevan_ai_row_dict) return None def create_interactive_report_sensedday(UID, Fields, global_flag=False): db_firestore = firestore.client() field_data_list = [] jeevan_ai_data_list = [] print("Starting processing fields") print("Wait 1-2 minutes for first batch of " + str(BATCH_SIZE) + " to process") threads = [] i = 0 for key, dates in Fields.items(): if ( type(dates) == dict and UID == "TCXcp5VIsfhHZrh0nm2VsgBtcGy2" and list(dates.keys()) == ["dates", "main_fieldID"] ): main_fieldID = dates["main_fieldID"] dates_list = dates["dates"] if len(dates_list) == 0: t = threading.Thread( target=process_one_field_data_senseday, args=( UID, key, main_fieldID, "latestdate", field_data_list, jeevan_ai_data_list, db_firestore, ), ) # res = process_one_field_data_senseday( # UID, # key, # main_fieldID, # "latestdate", # field_data_list, # jeevan_ai_data_list, # db_firestore, # ) # else: # for date in dates_list: # t = threading.Thread( # target=process_one_field_data_senseday, # args=(UID, key, main_fieldID, date, field_data_list, jeevan_ai_data_list, db_firestore), # ) # res = process_one_field_data_senseday( # UID, # key, # main_fieldID, # date, # field_data_list, # jeevan_ai_data_list, # db_firestore, # ) # elif UID != "TCXcp5VIsfhHZrh0nm2VsgBtcGy2": # if len(dates) == 0: # res = process_one_field_data_senseday( # UID, # key, # None, # "latestdate", # field_data_list, # jeevan_ai_data_list, # db_firestore, # ) # else: # for date in dates: # res = process_one_field_data_senseday( # UID, # key, # None, # date, # field_data_list, # jeevan_ai_data_list, # db_firestore, # ) time.sleep(0.5) t.start() threads.append(t) batch_size = BATCH_SIZE i += 1 if len(threads) == batch_size: for t in threads: t.join() # Wait for all threads to complete threads = [] ## making it null again to start again print("Processed until", i, " fields") # Convert the list of dictionaries to a JSON string print("All fields processed! Creating report......") if field_data_list and jeevan_ai_data_list: json_data = json.dumps(field_data_list, indent=2) jeevanai_data = json.dumps(jeevan_ai_data_list, indent=2) # Read the content of the HTML file template_file_name = "template_Interactive_jeevanai_gavl_report.html" # template_file_name = "template_Interactive_jeevanai_report.html" # template_file_name = "template_demo_Interactive_jeevanai_report.html" output_file_name = UID + ".html" # output_file_name = UID + "/interactive_report.html" with open(template_file_name, "r", encoding="utf-8") as file: content_str = file.read() # bucket = storage.bucket() # blob = bucket.blob("template_Interactive_jeevanai_report.html") # # Read the file content as a string # content_str = blob.download_as_text(encoding="utf-8") if global_flag: output_file_name = "interactive_report.html" blob_path = f"PaidMonitoredFields/{UID}/{output_file_name}" else: output_file_name = ( "interactive_report" + "_" + str(int(time.time() * 1000)) + ".html" ) blob_path = ( f"PaidMonitoredFields/{UID}/interactive_reports/{output_file_name}" ) ## Report Data content_str = content_str.replace("DATA_TO_REPLACE", json_data) ## Report Date content_str = content_str.replace( "DATE_TO_REPLACE", datetime.today().strftime("%d %b %Y") ) ###jeevanai Data content_str = content_str.replace("jeevan_ai_data_replace", jeevanai_data) # blob = bucket.blob(blob_path) # blob.upload_from_string(content_str, content_type="text/html") # expiration_time = datetime.now(timezone.utc) + timedelta(days=30) # signed_url = blob.generate_signed_url(expiration_time) # short_url = create_shareable_url(signed_url) # short_url = "abc" # content_str = content_str.replace(f'"URL_TO_REPLACE"', f'"{short_url}"') # blob.upload_from_string(content_str, content_type="text/html") # Save the final report = modified HTML content with open(output_file_name, "w") as f: f.write(content_str) print(f"Html file '{output_file_name}' created successfully.") # Create a filename based on the advisory ID filename = f"JeevanAI_Advisory_GAVL.xlsx" wb.save(filename) print(f"Excel file '{filename}' created successfully.") print("Done!!") def update_excel_file(phone, name, fieldID, main_fieldID, lastSatelliteVisit, advisory): # print("Updating excel file", fieldID) global pest_sheet, fertilizer_sheet, advisory_sheet # Common data for all rows from this advisory common_data = [ phone, name, main_fieldID + "_" + fieldID, datetime.strptime(lastSatelliteVisit, "%Y%m%d").strftime("%d %B %Y"), ] advisory_data = advisory.copy() # print("advisory_data", advisory_data) # Process Pest/Disease/Weed data if "Weed" in advisory_data and "potential_weeds" in advisory_data["Weed"]: for weed in advisory_data["Weed"]["potential_weeds"]: organic_sol = weed.get("solution", {}).get("organic", "") chemical_sol = weed.get("solution", {}).get("chemical", "") combined_solutions = ( f"{organic_sol}, {chemical_sol}" if organic_sol and chemical_sol else organic_sol or chemical_sol ) row = common_data.copy() row.extend( [ "Weed", weed.get("probability", ""), weed.get("weed_name", ""), combined_solutions, ] ) # print("Weed") pest_sheet.append(row) if "Pest and Disease" in advisory_data: # Process pests if "potential_pests" in advisory_data["Pest and Disease"]: for pest in advisory_data["Pest and Disease"]["potential_pests"]: organic_sol = pest.get("solution", {}).get("organic", "") chemical_sol = pest.get("solution", {}).get("chemical", "") combined_solutions = ( f"{organic_sol}, {chemical_sol}" if organic_sol and chemical_sol else organic_sol or chemical_sol ) row = common_data.copy() row.extend( [ "Pest", pest.get("probability", ""), pest.get("pest_name", ""), combined_solutions, ] ) # print("pests") pest_sheet.append(row) # Process diseases if "potential_diseases" in advisory_data["Pest and Disease"]: for disease in advisory_data["Pest and Disease"]["potential_diseases"]: row = common_data.copy() organic_sol = disease.get("solution", {}).get("organic", "") chemical_sol = disease.get("solution", {}).get("chemical", "") combined_solutions = ( f"{organic_sol}, {chemical_sol}" if organic_sol and chemical_sol else organic_sol or chemical_sol ) row.extend( [ "Disease", disease.get("probability", ""), disease.get("disease_name", ""), combined_solutions, ] ) # print("disease") pest_sheet.append(row) if "Fertilizer" in advisory_data: nutrients = ["N", "P", "K", "S", "Zn"] for nutrient in nutrients: if nutrient in advisory_data["Fertilizer"]: row = common_data.copy() fert_data = advisory_data["Fertilizer"][nutrient] # Get organic solution - handle both string and tuple cases organic_sol = fert_data.get("organic_source", "") if isinstance(organic_sol, tuple) and len(organic_sol) > 0: organic_sol = organic_sol[0] # Take first element if it's a tuple # Get chemical solution chemical_sol = fert_data.get(f"source_of_{nutrient}", "") if isinstance(chemical_sol, tuple) and len(chemical_sol) > 0: chemical_sol = chemical_sol[0] # Take first element if it's a tuple combined_solutions = ( f"{organic_sol}, {chemical_sol}" if organic_sol and chemical_sol else organic_sol or chemical_sol ) row.extend( [ nutrient, fert_data.get("current_soil_composition_kpa", ""), fert_data.get("ideal_soil_composition_kpa", ""), combined_solutions, ] ) # print("fertilizer") fertilizer_sheet.append(row) # Add SOC, pH, Salinity if they exist in Soil Management if "Soil Management" in advisory_data: soil_data = advisory_data["Soil Management"] for soil_param in ["pH", "salinity"]: ## Removed SOC for GAVL if soil_param in soil_data: row = common_data.copy() row.extend( [ soil_param, soil_data[soil_param], "", # Ideal might not be specified for these in the data "", # Organic solution "", # Chemical solution ] ) # print("soil managment") fertilizer_sheet.append(row) ################# Start of ################# poc_data = getDictFromFile("existing_land_ids.json") poc_data_df = pd.DataFrame(poc_data) Fields = {} for index, row in poc_data_df.iterrows(): if row["PolygonID_FieldID_Pairs"]: polygon_id = row["PolygonID_FieldID_Pairs"][0][0] field_id = row["PolygonID_FieldID_Pairs"][0][1] if polygon_id not in Fields: Fields[polygon_id] = {"dates": [], "main_fieldID": field_id} # if index == FIELDS - 1: # break print(len(Fields), "Fields Loaded Successfully") ################# firebase Initialization ################# # # Firebase Config/Setup # #------------------------------------------------------------------------------- database_url = "https://farmbase-b2f7e-31c0c.firebaseio.com/" storage_url = "farmbase-b2f7e.appspot.com" cred = credentials.Certificate("servicekey.json") try: firebase_admin.initialize_app( cred, {"databaseURL": database_url, "storageBucket": storage_url} ) except: print("Already initialized") # default_app = firebase_admin._apps["[DEFAULT]"] ############ Preparing/Creating Excel File ############ wb = Workbook() # Create sheets global pest_sheet, fertilizer_sheet, advisory_sheet pest_sheet = wb.create_sheet("pestDiseaseWeed", 0) fertilizer_sheet = wb.create_sheet("fertilizerManagement", 1) # Remove default sheet if it exists if "Sheet" in wb.sheetnames: wb.remove(wb["Sheet"]) # Add headers to each sheet # Pest/Disease/Weed sheet headers pest_headers = [ "LandID", "Farmer_code", "fieldID_polygonID", "SatelliteVisitDate", "Category", "Probability", "Type", "Solution_Org,Solution_Chemical", ] pest_sheet.append(pest_headers) # Fertilizer Management sheet headers fertilizer_headers = [ "LandID", "Farmer_code", "fieldID_polygonID", "SatelliteVisitDate", "Nutrient", "Current (Kg/Acre)", "Ideal (Kg/Acre)", "Solution_Org,Solution_Chemical", ] fertilizer_sheet.append(fertilizer_headers) ############################ Running the Function ############################ UID = "TCXcp5VIsfhHZrh0nm2VsgBtcGy2" create_interactive_report_sensedday(UID, Fields)