import numpy as np from PIL import Image, ImageDraw import matplotlib.pyplot as plt def create_polymask(uid, fieldid, w_mask, h_mask, X, Y, input_image, output_filename): img = Image.open(input_image) w_img, h_img = img.size # Create a new white image white_image = Image.new('RGBA', (w_img, h_img), color='white') # Create a new mask image with white background and resize it to match the original image dimensions mask_image = Image.new('L', (w_img, h_img), color=0) # Convert X and Y to tuples vertices = list(zip(X, Y)) # Draw polygon mask on the resized image draw = ImageDraw.Draw(mask_image) draw.polygon(vertices, fill=255) # Convert the mask image to RGB mode to match the original image mask_image_rgb = mask_image.convert('RGBA') # Convert mask to RGB white_image.paste(img, (0, 0), mask_image) white_image.save(output_filename,facecolor='white') # Plot the resulting image #plt.imshow(white_image) #plt.axis('off') #plt.show() return 1 def map_planet_coords(uid,fieldid,coordinates,fieldmaxlat,fieldminlat,fieldmaxlong,fieldminlong,input_folder, output_folder): import math import matplotlib.path from matplotlib.path import Path import numpy as np import cv2 from PIL import Image import collections import firebase_admin from firebase_admin import credentials from firebase_admin import db cred = credentials.Certificate('servicekey.json') coordinates = collections.OrderedDict(sorted(coordinates.items())) #print(fieldid) #print("cooridnates",coordinates) tci_file_name = f"{input_folder}/{fieldid}.png" #tci_file_name = uid +'/' + fieldid + '_highres_num.png' tci = cv2.imread(tci_file_name,0) w_tci,h_tci = tci.shape #print(w_tci, h_tci) centerx = round(w_tci/2) centery = round(h_tci/2) zero_lat = fieldmaxlat zero_long = fieldminlong X = [] Y = [] #print(fieldminlat, fieldmaxlat, fieldminlong, fieldmaxlong, h_tci, w_tci) pixelsperlat = h_tci/abs(float(fieldmaxlong)-float(fieldminlong)) pixelsperlong = w_tci/abs(float(fieldmaxlat)-float(fieldminlat)) #print(pixelsperlat, pixelsperlong) pformatflag =0 X = [] Y = [] ploc = 0 for k,v in coordinates.items(): pointKey = "P_" + str(ploc) ploc = ploc + 1 v = coordinates[pointKey] temp_lat = v['Latitude'] temp_long = v['Longitude'] temp_lat = float(temp_lat) temp_long = float(temp_long) x = round((temp_long-zero_long)*pixelsperlat) y = round((zero_lat-temp_lat)*pixelsperlong) X.append(x) Y.append(y) output_file = f"{output_folder}/masked/{fieldid}.png" create_polymask(uid, fieldid, w_tci, h_tci, X, Y, tci_file_name, output_file) import json # Initialize an empty dictionary to store data polygon_data = {} # Iterate through polygonID_list for polygonID in polygon_inside[:]: try: fieldID, farmerID, landID, field_area, polygon_coordinates, fieldmaxlat, fieldminlat, fieldmaxlong, fieldminlong = get_polygonobj(path_polygonobj, UID, polygonID) # Store data in the dictionary with polygonID as the key polygon_data[str(polygonID)] = { 'fieldID': fieldID, 'farmerID' : farmerID, 'landID': landID, 'field_area': field_area, 'polygon_coordinates': polygon_coordinates, 'fieldmaxlat': fieldmaxlat, 'fieldminlat': fieldminlat, 'fieldmaxlong': fieldmaxlong, 'fieldminlong': fieldminlong, } except Exception as e: print(f"Error processing polygonID {polygonID}: {e}") # Define the output JSON file path output_json_file = '/content/drive/MyDrive/YOLOv8_oil_palm_detection/polygon_data_all.json' # Write the dictionary to a JSON file with open(output_json_file, 'w') as json_file: json.dump(polygon_data, json_file, indent=4) print(f"Saved polygon data to {output_json_file}") text_to_remove = "1613971589362" for item in polygon_inside[:]: # Use a copy of the list with [:] to avoid modifying while iterating if text_to_remove in item: polygon_inside.remove(item) len(polygon_inside) import json # Step 1: Read the JSON file with open('/content/drive/MyDrive/YOLOv8_oil_palm_detection/polygon_data_all.json', 'r') as file: polygon_data = json.load(file) polygon_data poly_count = 0 for polygonID in polygon_inside[1000:1001]: print(poly_count, polygonID) #fieldID, farmerID, landID, field_area, polygon_coordinates, fieldmaxlat, fieldminlat, fieldmaxlong, fieldminlong = get_polygonobj(path_polygonobj, UID, polygonID) fieldmaxlat = polygon_data[f"{polygonID}"]["fieldmaxlat"] fieldminlat = polygon_data[f"{polygonID}"]["fieldminlat"] fieldmaxlong = polygon_data[f"{polygonID}"]["fieldmaxlong"] fieldminlong = polygon_data[f"{polygonID}"]["fieldminlong"] coordinates = polygon_data[f"{polygonID}"]["polygon_coordinates"] input_folder = "/content/drive/MyDrive/YOLOv8_oil_palm_detection/satellite_images/" output_folder = "/content/drive/MyDrive/YOLOv8_oil_palm_detection/satellite_images/" plot_polygon_on_image(polygonID, fieldminlat, fieldmaxlat, fieldminlong, fieldmaxlong) map_planet_coords(UID,polygonID,coordinates,fieldmaxlat,fieldminlat,fieldmaxlong,fieldminlong, input_folder, output_folder) poly_count = poly_count+1 ######################oil_palm_detection########### # Commented out IPython magic to ensure Python compatibility. # Install Ultralytics library # %pip install ultralytics import os import cv2 from torchvision.models.detection import fasterrcnn_resnet50_fpn from ultralytics import YOLO ROOT_DIR = "/content/drive/MyDrive/YOLOv8_oil_palm_detection/" confidence_thres = 0.15 iou_thres = 0.20 # IoU is a measure of the overlap between two bounding boxes. #It's calculated by dividing the area of intersection between the bounding boxes by the area of their union. #IoU = Area of Intersection / Area of Union #The IoU value ranges from 0 to 1, where 0 indicates no overlap between the bounding boxes, and 1 indicates complete overlap. # Define the directory where you want to save the images #OUTPUT_DIR = f'{ROOT_DIR}detections/ep100/detected_{confidence_thres}_{iou_thres}/' OUTPUT_DIR = f'{ROOT_DIR}/detections/masked_images/' # Create the output directory if it doesn't exist os.makedirs(OUTPUT_DIR, exist_ok=True) # Path to the trained model file #model_file = f"{ROOT_DIR}/train_ep50/weights/best.pt" model_file = f"{ROOT_DIR}yolov8_palm_detection_best.pt" # Load the trained model model = YOLO(model_file) TESTING_DIR = f"{ROOT_DIR}satellite_images/masked/" #TESTING_DIR = f"/content/blah.png" # List all files in the folder image_files = os.listdir(TESTING_DIR) #image_files = ['/content/blah.png'] tree_count = {} # Process each image file for image_file in image_files[:]: image_path = os.path.join(TESTING_DIR, image_file) # Read the image frame = cv2.imread(image_path) #results = model(frame)[0] results = model(frame, conf=confidence_thres, iou=iou_thres, max_det=1000, save=True, save_txt=True, save_json=True)[0] detect_count = len(results.boxes) polygon_id = image_file.split('.')[0] tree_count[polygon_id] = detect_count #print(polygon_id) #print(detect_count) # Loop through the detected objects for result in results.boxes.data.tolist(): x1, y1, x2, y2, score, class_id = result # Calculate center coordinates of the bounding box center_x = int((x1 + x2) / 2) center_y = int((y1 + y2) / 2) # Draw circle at the center of the bounding box cv2.circle(frame, (center_x, center_y), radius=5, color=(0, 255, 255), thickness=-1) # -1 thickness fills the circle # Draw bounding box #cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 255), 2) # Display class name and score #label = f'{results.names[int(class_id)]}: {score:.2f}' #label = f'{score:.2f}' #cv2.putText(frame, label, (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, cv2.LINE_AA) detect_count = str(detect_count) label = f'Palm tree count:{detect_count}' # Step 1: Determine text size font = cv2.FONT_HERSHEY_SIMPLEX font_scale = 0.5 thickness = 1 text_size, _ = cv2.getTextSize(label, font, font_scale, thickness) text_x = 10 # Adjust this value to control the distance from the left edge text_y = frame.shape[0] - 10 # Bottom of the image minus 10 pixels for padding baseline = text_size[1] + 5 # Adjust this value for vertical spacing # Step 3: Create a filled rectangle with opacity (alpha blending) # Calculate the coordinates for the background rectangle rect_x1 = text_x - 5 rect_y1 = text_y - text_size[1] - 5 rect_x2 = text_x + text_size[0] + 5 rect_y2 = text_y + baseline + 5 # Ensure the rectangle coordinates are within the image bounds rect_x1 = max(rect_x1, 0) rect_y1 = max(rect_y1, 0) rect_x2 = min(rect_x2, frame.shape[1]) rect_y2 = min(rect_y2, frame.shape[0]) # Create a copy of the image to overlay the rectangle #frame_with_rect = frame.copy() # Draw the filled rectangle with opacity (alpha blending) opacity = 0.7 # Opacity level (0 to 1) #cv2.rectangle(frame, (rect_x1, rect_y1), (rect_x2, rect_y2), (255, 255, 255), -1) # Red color for demonstration #cv2.putText(frame, label, (text_x, text_y), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 165, 0), 1, cv2.LINE_AA) #image_with_text = cv2.putText(image, text, text_org, font, font_scale, font_color, thickness) # Display the annotated image cv2.waitKey(0) # Save the annotated image output_path = os.path.join(OUTPUT_DIR, f'{os.path.splitext(image_file)[0]}_object_detection.png') cv2.imwrite(output_path, frame) # Release resources cv2.destroyAllWindows() tree_count import json # Step 1: Read the JSON file with open('polygon_data.json', 'r') as file: data = json.load(file) # Conversion factors sq_m_to_ha = 1 / 10000 # 1 sq m = 0.0001 hectares sq_m_to_acre = 1 / 4046.86 # 1 sq m = 0.000247105 acres # Step 2: Merge with tree_count dictionary for key, value in data.items(): print(key) if key in tree_count: value['total_tree_count'] = tree_count[key] area_sq_m = value['field_area'] value['area_hectares'] = round(area_sq_m * sq_m_to_ha,2) value['area_acres'] = round(area_sq_m * sq_m_to_acre,2) value['tree_count_per_hectare'] = round((value['total_tree_count'] / value['area_hectares']),2) value['tree_count_per_acre'] = round((value['total_tree_count'] / value['area_acres']),2) del value['polygon_coordinates'] del value['fieldmaxlat'] del value['fieldminlat'] del value['fieldmaxlong'] del value['fieldminlong'] print(data) # Step 3: Write the modified data back to the JSON file with open('field_data_with_palm_tree_count.json', 'w') as file: json.dump(data, file, indent=4) # indent for pretty formatting (optional) import cv2 import json # Example list of field_id values polygon_id_list = polygon_inside[:10] # Process each field_id for polygon_id in polygon_id_list: # 1. Open the .png image ROOT_DIR = "/content/drive/MyDrive/YOLOv8_oil_palm_detection/detections" image_path = f'{ROOT_DIR}/masked_images/{polygon_id}_object_detection.png' img = cv2.imread(image_path) if img is not None: # 2. Read the .json file json_file_path = 'field_data_with_palm_tree_count.json' # Replace with your JSON file path with open(json_file_path) as f: data = json.load(f) #print(data) # 3. Extract data based on field_id if polygon_id in data.keys(): print(polygon_id) tree_count = data[polygon_id]['total_tree_count'] # Example key, adjust as per your JSON structure field_id = data[polygon_id]['fieldID'] farmer_id = str(data[polygon_id]['farmerID']) land_id = data[polygon_id]['landID'] tree_count_hect = data[polygon_id]['tree_count_per_hectare'] tree_count_acre = data[polygon_id]['tree_count_per_acre'] # 4. Write values on the image (example: put text on image) text = f''' FarmerCode: {farmer_id} \nLandID: {land_id} \nTree Count: {tree_count} ''' #Tree count/hectare: {tree_count_hect} #Tree count/acre: {tree_count_acre} #FieldID: {field_id} #PolygonID: {polygon_id} #cv2.putText(img, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2) # Position to start writing text x, y = 50,50 # Calculate text size to determine the rectangle size text_lines = text.strip().split('\n') max_text_width = 0 total_text_height = 0 # Get maximum width and total height of all lines for line in text_lines: text_size, _ = cv2.getTextSize(line, font, font_scale, thickness) text_width, text_height = text_size max_text_width = max(max_text_width, text_width) total_text_height += text_height + 5 # Add some spacing between lines # Create rectangle coordinates rect_x1, rect_y1 = x, y - total_text_height rect_x2, rect_y2 = x + max_text_width + 130, y + 90 # Adjust as necessary # Draw rectangle with semi-transparent white color (opacity 0.7) overlay = img.copy() cv2.rectangle(overlay, (rect_x1, rect_y1), (rect_x2, rect_y2), (255, 255, 255), cv2.FILLED) cv2.addWeighted(overlay, 0.7, img, 0.3, 0, img) # Split the multi-line text into lines and draw each line lines = text.strip().split('\n') for i, line in enumerate(lines): cv2.putText(img, line, (x, y + i*30), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 0, 0), 2) # 5. Save the modified image with a new name annotated_image_path = f'{ROOT_DIR}/with_count/{polygon_id}_annotated.png' cv2.imwrite(annotated_image_path, img) #print(f'Saved annotated image: {annotated_image_path}') #else: #print(f'No data found for field_id: {polygon_id}') #else: #print(f'Error loading image: {image_path}') # prompt: align the multiple line of text in the previous cell # Calculate the maximum text width to align the text properly max_text_width = 0 for line in text_lines: text_size, _ = cv2.getTextSize(line, font, font_scale, thickness) text_width, _ = text_size max_text_width = max(max_text_width, text_width) # Adjust the x-coordinate to center the text x = (max_text_width - max_text_width) // 2 # Draw the text with the adjusted x-coordinate for i, line in enumerate(lines): cv2.putText(img, line, (x, y + i*30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 2)