import os
from PIL import Image
import cv2
import anthropic
import requests
from dotenv import load_dotenv
import pytesseract
import tempfile
import ssl
import instaloader
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from requests.auth import HTTPBasicAuth
import mimetypes
import os
from PIL import Image
import cv2
import anthropic
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.methods.posts import NewPost
from wordpress_xmlrpc.compat import xmlrpc_client
from dotenv import load_dotenv
import pytesseract
import tempfile
import base64
from instascrape import Profile, Post
import requests
from io import BytesIO
import time
import json
import requests
import traceback
from urllib.parse import urlparse
import os
import openpyxl
from openai import OpenAI
import ast
import kw_search
import os
from PIL import Image
import os
from datetime import datetime, timedelta
from urllib.parse import urlparse
import json
import delete_files
from post_news import get_news_articles, generate_and_publish_news_article
import unicodedata
from eventregistry import *
from tqdm import tqdm
# Define a function to create schema for each post
def create_schema(title, media_url_arr, blog_url):
# Extract image URLs
image_urls = list(media_url_arr) ## make sure it is a list
# Organizatio details
organization = (
{
"@type": "Organization",
"name": "Farmonaut",
"url": "https://farmonaut.com",
"sameAs": [
"https://www.facebook.com/farmonaut",
"https://twitter.com/farmonaut",
"https://www.linkedin.com/company/farmonaut",
"https://www.instagram.com/farmonaut",
# Add other social media links as necessary
],
},
)
breadcrumbs = create_breadcrumbs_fromURL(urlparse(blog_url))
return {
"@context": "https://schema.org",
"@type": "Article",
"headline": title,
"image": image_urls,
"author": organization,
"breadcrumb": breadcrumbs,
}
def create_breadcrumbs_fromURL(parsed_url):
# Split the path into segments
path_segments = parsed_url.path.strip("/").split("/") # Split by '/'
# Construct breadcrumb items
breadcrumbs = {"@type": "BreadcrumbList", "itemListElement": []}
# Add the home item
breadcrumbs["itemListElement"].append(
{
"@type": "ListItem",
"position": 1,
"name": "Home",
"item": f"{parsed_url.scheme}://{parsed_url.netloc}/", # Base URL
}
)
# Add items for each segment in the path
for position, segment in enumerate(path_segments, start=2): # Start at position 2
breadcrumb_item = {
"@type": "ListItem",
"position": position,
"name": segment.replace(
"-", " "
).title(), # Convert hyphens to spaces and capitalize
"item": f"{parsed_url.scheme}://{parsed_url.netloc}/{'/'.join(path_segments[:position-1])}", # Construct the URL up to this segment
}
breadcrumbs["itemListElement"].append(breadcrumb_item)
return breadcrumbs
def get_last_part(file_path):
return os.path.basename(file_path)
def convert_png_to_jpg(file_path):
# Check if the file exists and is a PNG
if not os.path.isfile(file_path) or not file_path.lower().endswith('.png'):
raise ValueError("The provided file is not a valid PNG file.")
# Open the PNG image
with Image.open(file_path) as img:
# Get the file name without extension
file_name = os.path.splitext(file_path)[0]
# Convert to RGB if the image has an alpha channel
if img.mode in ('RGBA', 'LA') or (img.mode == 'P' and 'transparency' in img.info):
img = img.convert('RGB')
# Save as JPG
jpg_path = f"{file_name}.jpg"
img.save(jpg_path, 'JPEG')
# Remove the original PNG file
os.remove(file_path)
#print(f"Converted {file_path} to {jpg_path} and removed the original PNG.")
# Example usage
# convert_png_to_jpg('path/to/your/image.png')
api_key = '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'
api_key = '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'
client = OpenAI(
api_key = api_key
)
import os
import shutil
import ast
import openpyxl
from difflib import SequenceMatcher
import pandas as pd
from fuzzywuzzy import fuzz
import requests
from requests.auth import HTTPBasicAuth
import json
import requests
from requests.auth import HTTPBasicAuth
import json
import html
import re
from requests.auth import HTTPBasicAuth
from PIL import Image
from PIL import Image
def add_watermark(main_image_path, watermark_path):
# Open the main image
main_image = Image.open(main_image_path).convert('RGBA')
# Open the watermark image
watermark = Image.open(watermark_path).convert('RGBA')
# Calculate the new size for the watermark
new_height = int(main_image.height * 0.1)
aspect_ratio = watermark.width / watermark.height
new_width = int(new_height * aspect_ratio)
# Resize the watermark
watermark = watermark.resize((new_width, new_height), Image.LANCZOS)
# Calculate the position for the watermark (bottom right)
#position = (main_image.width - watermark.width, main_image.height - watermark.height)
position = (main_image.width - watermark.width, 0)
# Create a new transparent image the same size as the main image
transparent = Image.new('RGBA', main_image.size, (0,0,0,0))
# Paste the watermark onto the transparent image
transparent.paste(watermark, position, watermark)
# Combine the main image with the watermark
output = Image.alpha_composite(main_image, transparent)
# Convert back to the original mode if it wasn't RGBA
original_image = Image.open(main_image_path)
if original_image.mode != 'RGBA':
output = output.convert(original_image.mode)
# Save the result, overwriting the original image
output.save(main_image_path)
# print(f"Watermark added to {main_image_path}")
# Example usage:
# add_watermark('path/to/main/image.jpg', 'path/to/watermark/image.png')
# Example usage:
# add_watermark('path/to/main/image.jpg', 'path/to/watermark/image.png')
def normalize_title(title):
# Decode HTML entities
decoded = html.unescape(title)
# Remove any remaining HTML tags
no_html = re.sub('<[^<]+?>', '', decoded)
# Convert to lowercase and remove non-alphanumeric characters
return re.sub(r'[^a-z0-9]', '', no_html.lower())
def publish_or_update_wordpress_post(site_url, username, password, post_data):
# WordPress REST API endpoints
posts_url = f"{site_url}/wp-json/wp/v2/posts"
# Set up authentication
auth = HTTPBasicAuth(username, password)
# Check if the API is accessible
try:
response = requests.get(f"{site_url}/wp-json", auth=auth)
response.raise_for_status()
except requests.exceptions.RequestException as e:
raise Exception(f"Failed to access WordPress API: {str(e)}")
# Check if a post with the same title exists
# existing_post = check_existing_post_by_title(site_url, auth, post_data['title'])
# if existing_post:
# # Update existing post
# update_url = f"{posts_url}/{existing_post['id']}"
# post_data['id'] = existing_post['id']
# response = requests.post(update_url, json=post_data, auth=auth)
# else:
# Create new post
response = requests.post(posts_url, json=post_data, auth=auth)
if response.status_code in [200, 201]:
return response.json()
else:
raise Exception(f"Failed to publish/update post: {response.text}")
def check_existing_post_by_title(site_url, auth, title):
# URL encode the title
encoded_title = requests.utils.quote(title)
# print(encoded_title)
search_url = f"{site_url}/wp-json/wp/v2/posts?search={encoded_title}"
# print(search_url)
response = requests.get(search_url, auth=auth)
title = normalize_title(title)
if response.status_code == 200:
# print('existing post found')
posts = response.json()
for post in posts:
rendered_title = normalize_title(post['title']['rendered'].lower())
# print(post['title']['rendered'].lower())
# print(title.lower())
# print(rendered_title, title)
if rendered_title == title:
print('post found', rendered_title)
return post
return None
#try:
# result = publish_or_update_wordpress_post(site_url, username, password, post_data)
# print("Post published/updated successfully:", result['link'])
#except Exception as e:
# print("Error:", str(e))
def replace_keywords(excel_file, sheet_name, keyword_array):
# Read the Excel sheet
df = pd.read_excel(excel_file, sheet_name=sheet_name)
# Ensure the column with keywords is named 'Keywords'
if 'Keywords' not in df.columns:
raise ValueError("Excel sheet must have a 'Keywords' column")
# Get the list of keywords from the Excel sheet
excel_keywords = df['Keywords'].tolist()
# Function to find the best match for a keyword
def find_best_match(keyword):
best_match = max(excel_keywords, key=lambda x: fuzz.ratio(keyword.lower(), str(x).lower()))
return best_match if fuzz.ratio(keyword.lower(), str(best_match).lower()) > 70 else keyword
# Replace keywords in the array with best matches
replaced_keywords = [find_best_match(keyword) for keyword in keyword_array]
return replaced_keywords
def match_keywords(excel_path, sheet_name, text, column_letter='A'):
# Open the Excel workbook and select the specified sheet
workbook = openpyxl.load_workbook(excel_path)
sheet = workbook[sheet_name]
# Read keywords/phrases from the Excel sheet
keywords = [cell.value for cell in sheet[column_letter] if cell.value]
# Function to calculate similarity ratio
def similarity(a, b):
return SequenceMatcher(None, a.lower(), b.lower()).ratio()
# Calculate similarity scores for each keyword/phrase
scores = [(keyword, max(similarity(keyword, word) for word in text.split())) for keyword in keywords]
# Sort by similarity score in descending order and get top 5
top_matches = sorted(scores, key=lambda x: x[1], reverse=True)[:5]
# Return only the keywords/phrases, not the scores
return [match[0] for match in top_matches]
def remove_common_elements(array1, array2):
return [x for x in array1 if x not in set(array2)]
def remove_keyphrases_from_excel(file_path, keyphrases, output_path=None):
# Load the workbook
wb = openpyxl.load_workbook(file_path)
# Iterate through all sheets
for sheet in wb.worksheets:
# Iterate through all cells in the sheet
for row in sheet.iter_rows():
for cell in row:
if cell.value:
# Convert cell value to string
cell_value = str(cell.value)
# Check if any keyphrase is in the cell value
for phrase in keyphrases:
if phrase in cell_value:
# Remove the keyphrase
cell_value = cell_value.replace(phrase, '')
# Update the cell value
cell.value = cell_value if cell_value.strip() else None
# Save the modified workbook
if output_path:
wb.save(output_path)
else:
wb.save(file_path)
print("Keyphrases removed successfully.")
def read_array_from_file(file_path):
# Open the text file in read mode
with open(file_path, 'r') as file:
# Read the content of the file
content = file.read()
# Use ast.literal_eval to safely evaluate the string representation of the array
array = ast.literal_eval(content)
return array
def remove_all_files_in_folder(folder_path):
# Check if the folder exists
if not os.path.exists(folder_path):
#print(f"The folder {folder_path} does not exist.")
return
# Iterate over all files in the folder and remove them
for filename in os.listdir(folder_path):
file_path = os.path.join(folder_path, filename)
try:
# Check if it's a file (not a folder) and remove it
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.isdir(file_path):
# If it's a directory, remove the directory and its contents
shutil.rmtree(file_path)
except Exception as e:
print(f"Error deleting {file_path}: {e}")
print(f"All files and directories in {folder_path} have been removed.")
def call_openai(prompt, temperature, max_tokens, model, model_key = ""):
#prompt = "Farmonaut wants to classify this google search query into only one of the following categories: a. Precision Agriculture, b. API/ Development, c. Traceability, d. Plantation, e. Unrelated, f. pests, diseases and weeds, g. irrigation, h. yield forecast, i. area estimation and crop identification, j. geotagging, k. fertilizers and soil health, l. Satellite/GIS/Remote Sensing, m. agri-tech startup/ company, n. agriculture content, o. not worth doing SEO. Answer only one category (without category alphabet). Google search query to classify: " + google_query
#prompt = "Classify whether this google search query is related or unrelated to what Farmonaut does. Strictly answer RELATED OR UNRELATED : " + google_query
if model == "o1-preview":
completion = client.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": prompt}
]
)
elif model == "deepseek":
client = OpenAI(api_key=model_key, base_url="https://api.deepseek.com")
# Round 1
messages=[{"role": "system", "content": "You are a expert in SEO and a representative of Farmonaut."},{"role": "user", "content": prompt}]
completion = client.chat.completions.create(
model="deepseek-reasoner",
messages=messages,
max_tokens=max_tokens,
temperature=temperature
)
#completion = completion.choices[0].message.reasoning_content
#content = response.choices[0].message.content
elif model == "o3-mini":
client = OpenAI(
api_key = model_key
)
completion = client.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": prompt}
]
)
else:
client = OpenAI(
api_key = model_key
)
completion = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": "You are a expert in SEO and a representative of Farmonaut."},
{"role": "user", "content": prompt}
],
max_tokens=max_tokens,
temperature=temperature
)
return completion.choices[0].message.content
def save_to_file(file_name, content):
with open(file_name, 'w') as file:
file.write(content)
print(f"Content saved to {file_name}")
def string_to_array(string):
# Use ast.literal_eval to safely evaluate the string as a list
try:
array = ast.literal_eval(string)
# print(array)
except:
array = string
return array
def get_first_column_values(file_path, sheet_name=None):
# Load the workbook
workbook = openpyxl.load_workbook(file_path, data_only=True)
# If a sheet name is specified, load that sheet, otherwise use the active sheet
sheet = workbook[sheet_name] if sheet_name else workbook.active
# Get all the values from the first column (Column A)
first_column_values = []
for cell in sheet['A']:
# Convert cell value to string and append to list, handle empty cells
first_column_values.append(str(cell.value) if cell.value is not None else "")
return first_column_values
# Example usage:
# file_path = 'your_file.xlsx'
# values = get_first_column_values(file_path, 'Sheet1')
# print(values)
def get_file_extension(url):
# Parse the URL
parsed_url = urlparse(url)
# Get the path component of the URL
path = parsed_url.path
# Extract the file extension
file_extension = os.path.splitext(path)[1]
# Return the extension (without the dot) or an empty string if there's no extension
return file_extension[1:] if file_extension else ""
# posts = list(posts)
ssl._create_default_https_context = ssl._create_unverified_context
# Load environment variables
load_dotenv()
# Instagram session id (you need to get this from your browser after logging in to Instagram)
SESSIONID = os.getenv("INSTAGRAM_SESSIONID")
# Headers for Instagram requests
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.74 Safari/537.36 Edg/79.0.309.43",
"cookie": f'sessionid={SESSIONID};'
}
# Initialize Instaloader
L = instaloader.Instaloader()
#L.login('himanshujain4578', 'harish@4321')
L.post_metadata_txt_pattern = ""
L.download_geotags = False
L.save_metadata = False
L.save_metadata_json = False
L.download_comments = False
# Anthropic API key (replace with your actual key)
#anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
# WordPress credentials
wp_url = "https://www.farmonaut.com"
wp_username = "ankuromar296"
wp_password = "Tjat A2hz 9XMv pXJi YbV0 GR8o"
def remove_keywords(selected_keywords, excel_file, sheet_name, keyword_column, num_keywords=5):
# Read the Excel sheet
df = pd.read_excel(excel_file, sheet_name=sheet_name)
# Ensure the keyword column exists
if keyword_column not in df.columns:
raise ValueError(f"Column '{keyword_column}' not found in the Excel sheet.")
# Remove the selected keywords from the DataFrame
df = df[~df[keyword_column].isin(selected_keywords)]
# Save the updated DataFrame back to the Excel file
df.to_excel(excel_file, sheet_name=sheet_name, index=False)
def select_and_remove_keywords(text, excel_file, sheet_name, keyword_column, num_keywords=5):
# Read the Excel sheet
df = pd.read_excel(excel_file, sheet_name=sheet_name)
# Ensure the keyword column exists
if keyword_column not in df.columns:
raise ValueError(f"Column '{keyword_column}' not found in the Excel sheet.")
# Get the list of keywords
keywords = df[keyword_column].tolist()
# Create a TF-IDF vectorizer
vectorizer = TfidfVectorizer()
# Ensure text and keywords are strings
text = str(text) if isinstance(text, dict) else text
keywords = [str(keyword) if isinstance(keyword, dict) else keyword for keyword in keywords]
# Fit the vectorizer on the text and transform the keywords
tfidf_matrix = vectorizer.fit_transform([text] + keywords)
# Calculate cosine similarity between the text and each keyword
cosine_similarities = (tfidf_matrix * tfidf_matrix.T).toarray()[0][1:]
# Get the indices of the top num_keywords similar keywords
top_indices = np.argsort(cosine_similarities)[-num_keywords:][::-1]
# Select the top keywords
selected_keywords = [keywords[i] for i in top_indices]
# Remove the selected keywords from the DataFrame
df = df[~df[keyword_column].isin(selected_keywords)]
# Save the updated DataFrame back to the Excel file
df.to_excel(excel_file, sheet_name=sheet_name, index=False)
return selected_keywords
# Existing functions remain the same
# (select_and_remove_keywords, get_instagram_posts, extract_text_from_image, extract_text_from_video, generate_blog_content)
def call_genai(prompt, temperature, max_tokens):
#return call_openai(prompt, temperature, max_tokens, "deepseek", "sk-774110440b0e44b9a004bc38451bc7f5")
client = anthropic.Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key="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",
)
claude_model = "claude-3-5-sonnet-20240620"
#claude_model = "claude-3-5-sonnet-20241022"
message = client.messages.create(
model= claude_model,
max_tokens=max_tokens,
temperature=temperature,
system = "You are an SEO expert, a gis/ remote sensing expert, an agriculture and horticulture scientist, and a representative of Farmonaut (farmonaut.com).",
messages=[
{"role": "user", "content": prompt}
]
)
#print(message)
return message.content[0].text
def upload_media_to_wordpress(file_path, title):
endpoint = f"{wp_url}/wp-json/wp/v2/media"
auth = HTTPBasicAuth(wp_username, wp_password)
mime_type, _ = mimetypes.guess_type(file_path)
media_data = {
'alt_text':title,
'caption':title,
'description':title
}
upload_name = f"{title}_{os.path.basename(file_path)}"
with open(file_path, 'rb') as file:
files = {'file': (upload_name, file, mime_type)}
#files = {'file': (os.path.basename(file_path), file, mime_type)}
response = requests.post(endpoint, files=files, auth=auth, json = media_data)
if response.status_code == 201:
return response.json()['id'], response.json()['source_url']
else:
print(f"Failed to upload media. Status code: {response.status_code}")
print(f"Response: {response.text}")
return None, None
def extract_text_from_video(video_path):
video = cv2.VideoCapture(video_path)
fps = int(video.get(cv2.CAP_PROP_FPS))
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frame_count / fps
text = ""
for i in range(0, int(duration), 1):
video.set(cv2.CAP_PROP_POS_MSEC, i * 1000)
success, frame = video.read()
if not success:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_text = pytesseract.image_to_string(gray)
text += frame_text + "\n"
video.release()
return text
def process_media2(media_url_arr, title):
#print(media_url_arr)
media_info = []
media_num = 0
folder_name = 'insta_files'
for url in media_url_arr:
#print(url)
media_num = media_num + 1
file_path = folder_name + '/' + str(media_num) + '.' + str(get_file_extension(url))
try:
response = requests.get(url, stream=True)
except:
print(traceback.format_exc())
response = None
if response is None:
continue
os.makedirs(os.path.dirname(file_path), exist_ok=True)
if response.status_code == 200:
with open(file_path, 'wb') as file:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
file.write(chunk)
#print(f"File downloaded successfully: {file_path}")
add_watermark(file_path, 'watermark.jpg')
convert_png_to_jpg(file_path)
else:
print(f"Failed to download file. Status code: {response.status_code}")
# with tempfile.TemporaryDirectory() as tmpdir:
# for media_url in media_url_arr:
# L.download_post(post, target=tmpdir)
media_files = os.listdir(folder_name)
for file in media_files:
file_path = os.path.join(folder_name, file)
media_id, media_url = upload_media_to_wordpress(file_path, title)
if media_id and media_url:
media_info.append({
'type': 'image',
'text': title,
'id': media_id,
'url': media_url
})
else:
continue # Skip files that are neither videos nor images
return media_info
def extract_text_from_image(image_path):
image = Image.open(image_path)
text = pytesseract.image_to_string(image)
return text
import pandas as pd
import ast
def get_keyword_arrays(file_path):
# Read the Excel file into a DataFrame
df = pd.read_excel(file_path, usecols=['Keywords'])
# Iterate through each row and convert the string representation of the list into an actual list
for keywords in df['Keywords']:
try:
# Safely evaluate the string to convert it into a list
keyword_array = ast.literal_eval(keywords)
yield keyword_array
except (ValueError, SyntaxError):
# Handle cases where the string is not a valid list format
print(f"Invalid format for keywords: {keywords}")
yield []
def ai_image_url(prompt):
#print(prompt)
response = client.images.generate(
model="dall-e-3",
prompt=prompt,
size="1024x1024",
quality="standard",
n=1,
)
image_url = response.data[0].url
return image_url
def main():
total_count, good_count, bad_count = 0,0,0
# posts_done = ['C_kYH2NJURr', 'C_aMTxcN0Vs', 'C_TJJ_MN2jD', 'C_P35JyyIVW', 'C-_8t2MvV--', 'C-mgOBuR_pU']
posts_done = []
posts_failed = []
#posts = get_instagram_posts(instagram_username)
#posts = posts['data']['xdt_api__v1__feed__user_timeline_graphql_connection']['edges']
farmonaut_info = '''
Farmonaut is a pioneering agricultural technology company that offers advanced, satellite-based farm management solutions via android, iOS, web/browser App, and API. Its mission is to make precision agriculture affordable and accessible to farmers worldwide by integrating innovative technology and data-driven insights into traditional farming practices. Farmonaut's platform provides valuable services such as real-time crop health monitoring, AI-based advisory systems, blockchain-based traceability, and resource management tools. Here's a comprehensive overview of its technologies, business model, target audience, and value proposition.
1. Technologies
Farmonaut leverages a blend of satellite imagery, artificial intelligence (AI), blockchain, and machine learning to address various agricultural challenges.
Satellite-Based Crop Health Monitoring: Farmonaut uses multispectral satellite images to monitor crop health, providing farmers with insights into vegetation health (NDVI), soil moisture levels, and other critical metrics. This data helps farmers make informed decisions about irrigation, fertilizer usage, and pest management, ultimately optimizing crop yields and reducing resource wastage(
Farmonaut
)(
Farmonaut
).
Jeevn AI Advisory System: This AI-driven personalized farm advisory tool delivers real-time insights, weather forecasts, and expert crop management strategies to farmers. Jeevn AI analyzes satellite data and other inputs to generate customized advice, improving farm productivity and efficiency(
Farmonaut
).
Blockchain-Based Product Traceability: By integrating blockchain technology, Farmonaut enables traceability solutions for various industries, particularly agriculture. Blockchain ensures that every stage of the product's journey, from farm to consumer, is transparent and secure, thereby enhancing trust and reducing fraud in supply chains(
Farmonaut
).
Fleet and Resource Management: Farmonaut provides tools for fleet management, enabling agribusinesses to manage their logistics more efficiently. This helps reduce operational costs by optimizing vehicle usage, ensuring safety, and improving the overall management of agricultural machinery(
Farmonaut
).
Carbon Footprinting: To help agribusinesses monitor and reduce their environmental impact, Farmonaut offers carbon footprint tracking. This feature provides real-time data on emissions, allowing businesses to take steps towards sustainability and compliance with environmental regulations(
Farmonaut
)(
Farmonaut
).
2. Business Model
Farmonaut operates on a subscription-based model, offering various packages that cater to individual farmers, cooperatives, agribusinesses, and government institutions. The company provides flexible pricing tiers depending on the number of hectares monitored and the frequency of satellite data updates. Their platform is accessible through web and mobile apps, making it easy for users to track and manage their farms from anywhere(
Farmonaut
).
The business also extends its services through APIs, enabling developers and businesses to integrate Farmonaut’s satellite and weather data into their own systems. This API access broadens the company’s revenue streams and encourages collaboration with other agritech companies and agricultural research organizations(
Farmonaut
).
Additionally, Farmonaut partners with financial institutions to provide satellite-based verification for crop loans and insurance, reducing the likelihood of fraud and improving access to financing for farmers(
Farmonaut
)(
Farmonaut
).
3. Target Audience
Farmonaut’s solutions are tailored for a wide range of users in the agricultural ecosystem:
Individual Farmers: The platform offers individual farmers access to precise, real-time data on their crops. This allows them to make better decisions regarding crop health, pest management, and resource allocation. Small and medium-sized farms can particularly benefit from these affordable precision farming solutions(
Farmonaut
).
Agribusinesses: Large-scale agribusinesses use Farmonaut for plantation management, fleet tracking, and resource optimization. These businesses can manage vast farming operations more efficiently by leveraging satellite monitoring and AI-driven insights(
Farmonaut
).
Governments and NGOs: Farmonaut works with government agencies and non-governmental organizations (NGOs) to improve agricultural productivity, implement large-scale farm monitoring programs, and support sustainable farming initiatives. Governments also use Farmonaut's tools for crop area and yield estimation, especially in policy and subsidy distribution(
Farmonaut
).
Financial Institutions: By providing satellite-based verification of farms, Farmonaut helps banks and insurance companies streamline crop loan approvals and reduce fraudulent claims in agricultural insurance(
Farmonaut
)(
Farmonaut
).
Corporate Clients: Companies, especially in sectors like textile and food, use Farmonaut's blockchain-based traceability solutions to ensure the authenticity and transparency of their supply chains. This strengthens consumer trust and enhances the brand's reputation(
Farmonaut
).
4. Value Proposition and Benefits
Farmonaut’s key value propositions include:
Cost-Effective Precision Agriculture: Farmonaut democratizes access to precision agriculture by offering affordable services for real-time crop monitoring and farm management. Unlike traditional precision farming tools that require expensive hardware, Farmonaut relies on satellite imagery, making it a more economical solution for farmers of all scales(
Farmonaut
).
Increased Farm Productivity: By providing real-time data on crop health, soil moisture, and weather patterns, Farmonaut allows farmers to make informed decisions that optimize their resources. This leads to better crop yields, reduced input costs, and minimized crop losses(
Farmonaut
).
Sustainability: Through features like carbon footprint tracking and efficient resource management, Farmonaut promotes sustainable farming practices. This is crucial in today’s agriculture, where there is growing pressure to reduce environmental impact while increasing food production(
Farmonaut
).
Transparency and Trust: Farmonaut’s blockchain-based traceability solution ensures transparency in supply chains, particularly for corporate clients in agriculture and related sectors. By offering verifiable data on product origin and journey, the system helps build consumer trust(
Farmonaut
).
Access to Financing: Farmonaut's partnerships with financial institutions provide farmers with satellite-based verification for loans and insurance. This improves access to financing while reducing the risks for lenders(
Farmonaut
)(
Farmonaut
).
Scalability: The platform is highly scalable, serving clients from smallholder farmers to large agribusinesses and government bodies. Its modular design allows users to choose the services they need and scale them up as their operations grow(
Farmonaut
).
Conclusion
Farmonaut stands out in the agritech space by offering a comprehensive suite of tools that combine satellite technology, AI, and blockchain to meet the diverse needs of modern agriculture. Whether it's precision farming, supply chain transparency, or sustainability, Farmonaut is at the forefront of the agricultural revolution, making it easier for farmers and agribusinesses to thrive in an increasingly data-driven world. By lowering the cost barrier and providing advanced solutions, Farmonaut continues to empower farmers, improve productivity, and promote sustainable agricultural practices globally.
'''
yoast_guidelines = {
"yoastSEOGuidelines": {
"contentOptimization": [
{
"guideline": "Use focus keyword in the first paragraph",
"description": "Include your main keyword in the opening of your content."
},
{
"guideline": "Use focus keyword in the title",
"description": "Include your main keyword in the page title, preferably at the beginning."
},
{
"guideline": "Use focus keyword in the URL",
"description": "Include your main keyword in the page URL."
},
{
"guideline": "Use focus keyword in headings",
"description": "Include your main keyword in at least one subheading (H2, H3, etc.)."
},
{
"guideline": "Keyword density",
"description": "Maintain a keyword density between 0.5% and 2.5%."
},
{
"guideline": "Content length",
"description": "Write at least 300 words for regular posts and pages."
},
{
"guideline": "Use internal links",
"description": "Include internal links to other relevant pages on your website."
},
{
"guideline": "Use external links",
"description": "Include outbound links to authoritative sources when appropriate."
},
{
"guideline": "Use images",
"description": "Include at least one image with alt text containing the focus keyword."
}
],
"readability": [
{
"guideline": "Use short paragraphs",
"description": "Keep paragraphs to 150 words or less."
},
{
"guideline": "Use subheadings",
"description": "Break up text with descriptive subheadings (H2, H3, etc.)."
},
{
"guideline": "Use transition words",
"description": "Use transition words to improve content flow."
},
{
"guideline": "Vary sentence length",
"description": "Mix short and long sentences for better readability."
},
{
"guideline": "Use Flesch Reading Ease score",
"description": "Aim for a score of 60-70 for general audience content."
}
]
}
}
yoast_guidelines2 = {
"content": {
"focusKeyword": {
"firstParagraph": "Include in opening",
"title": "Include, preferably at start",
"url": "Include in page URL",
"headings": "Use in at least one subheading",
"density": "0.5% - 2.5%"
},
"length": "300+ words for posts/pages",
"links": {
"internal": "Include relevant internal links",
"external": "Link to authoritative sources"
},
"images": "Use with keyword in alt text"
},
"readability": {
"paragraphs": "Max 150 words",
"subheadings": "Use to break up text",
"transitionWords": "Improve content flow",
"sentenceLength": "Mix short and long",
"activeVoice": "Use in 90%+ of sentences",
"fleschScore": "Aim for 60-70"
}
}
blog_tones_and_themes = {
"tones": [
{"name": "Informative", "description": "Neutral, objective, and educational."},
{"name": "Conversational", "description": "Friendly, engaging, and informal."},
{"name": "Persuasive", "description": "Assertive, convincing, and motivational."},
{"name": "Inspirational", "description": "Uplifting, motivational, and positive."},
{"name": "Humorous", "description": "Light-hearted, witty, and entertaining."},
{"name": "Serious/Professional", "description": "Formal, authoritative, and professional."}
],
"themes": [
{"name": "Technology & Innovation", "description": "Focus on latest tech advancements and impacts."},
{"name": "Sustainability & Environment", "description": "Environmentally friendly and sustainable living."},
{"name": "Personal Growth & Development", "description": "Focus on self-improvement and productivity."},
{"name": "Industry Trends & News", "description": "Covering latest industry trends and news."},
{"name": "Case Studies & Success Stories", "description": "Showcasing examples of success."},
{"name": "Guides and How-tos", "description": "Step-by-step guides to accomplish tasks."},
{"name": "Problem-Solving", "description": "Addressing problems and providing solutions."},
{"name": "Lifestyle & Culture", "description": "Focusing on cultural and lifestyle aspects."},
{"name": "Health & Wellness", "description": "Topics related to physical or societal health."},
{"name": "Business & Entrepreneurship", "description": "Insights, tips, and strategies for businesses."},
{"name": "Finance & Investment", "description": "Advice on financial planning, budgeting, or investments."},
{"name": "Social Issues & Advocacy", "description": "Discussing social causes or movements."},
{"name": "Food & Cooking", "description": "Recipes, cooking tips, or food culture exploration."},
{"name": "Education & Learning", "description": "Tips for learning new skills, or education-related content."}
]
}
categories_obj = {
"580":"south-america",
"579": "asia",
"578":"africa",
"577":"united-kingdom",
"576":"canada",
"575":"europe",
"574":"australia",
"5":"blogs",
"573":"news",
"546":"case-study",
"542":"area-estimation",
"9":"remote-sensing",
"548":"precision-farming",
"572":"api-development",
"561": "usa"
}
youtube_videos = f'''
[('Farmonaut App Tutorial: How to Add & Map Fields Easily', 'https://youtube.com/watch?v=I3PZXJZE9as'), ("How Farmonaut's Satellite Technology is Revolutionizing Land Use in Agriculture", 'https://youtube.com/watch?v=B9K9IW0gy2Q'), ("Discover Farmonaut's Advanced Agri Solutions: Precision Crop Area Estimation - Egypt Case Study", 'https://youtube.com/watch?v=Fn5gY7QtFjo'), ('Unlocking Soil Organic Carbon: The Secret to Sustainable Farming with Farmonaut', 'https://youtube.com/watch?v=GEWF0ite050'), ("Explore Farmonaut's Advanced Crop Monitoring & Yield Prediction", 'https://youtube.com/watch?v=5wMEg-u5XdU'), ('Farmonaut Agro Admin App: Revolutionizing Large-Scale Farm Management', 'https://youtube.com/watch?v=iPg6X_s9Seo'), ('Satellite & AI Based Automated Tree Detection For Precise Counting and Location Mapping', 'https://youtube.com/watch?v=kB_V4JAlA1M'), ('Farmonaut Automated Detection of Alternate Wet and Dry Farming Phases', 'https://youtube.com/watch?v=GnXN51pte0E'), ('Welcome to the Future of Farming with JEEVN AI | AI Based Personalized Farm Advisory', 'https://youtube.com/watch?v=QkqWbooLh6s'), ('Transform Your Farming Experience with Farmonaut!', 'https://youtube.com/watch?v=i0w1z6FNxZQ'), ('WhatsApp Tutorial: Step-by-Step Guide to Connect Your Farm to Our Satellite Monitoring System', 'https://youtube.com/watch?v=XFLtA8zR96s'), ('Visualizing Farms with Satellite Data using iFrame for Farmonaut API Users', 'https://youtube.com/watch?v=J4HeFUJgwvk'), ('Integrate Weather Data Using Farmonaut API | Comprehensive Tutorial', 'https://youtube.com/watch?v=WgCHVXNDHNY'), ('How to Add and Remove Languages for Satellite Reports | Farmonaut API Tutorial', 'https://youtube.com/watch?v=-MRAf8_YX8E'), ('How to Check API Usage, Expired Farms, and Calculate Farm Area | Farmonaut API Tutorial', 'https://youtube.com/watch?v=sNZd4oxY7Zc'), ('How to Add a Field Using iFrame for Satellite Monitoring | Step-by-Step Tutorial', 'https://youtube.com/watch?v=S073EeIF3Xc'), ('How to Interpret Satellite Data for Agriculture | Tutorial | Farmonaut Mobile Apps', 'https://youtube.com/watch?v=OnsYwixc8_E'), ('How To Create An API Account | Farmonaut API Video Tutorial', 'https://youtube.com/watch?v=RpBlJ86Xgv4'), ("Farmonaut's Web App Tutorial: Comprehensive Guide for Interpreting Satellite Data", 'https://youtube.com/watch?v=e4BLMuWUAdU'), ('How to Retrieve Farm Data | Farmonaut API Tutorial', 'https://youtube.com/watch?v=OnuwHnpey0k'), ('Pause Resume or Extend Farm Satellite Monitoring - Farmonaut API Tutorial', 'https://youtube.com/watch?v=zBHE7mn0zT0'), ('Manage Your Farms with Ease Using Our APIs!', 'https://youtube.com/watch?v=kDWPl2hQpKI'), ('How to Check Consolidated Farm Report | Farmonaut Mobile Apps', 'https://youtube.com/watch?v=srbBgKp-MjQ'), ('How to Add Farm For Satellite Monitoring | Farmonaut Mobile Apps', 'https://youtube.com/watch?v=IVApjPza55M'), ('How To Check Detailed Satellite Report Of Your Farm - Farmonaut Mobile Apps', 'https://youtube.com/watch?v=wbHASbTJXvM'), ('How to Pause, Resume or Delete Field From Your Account | Farmonaut Mobile Apps', 'https://youtube.com/watch?v=9hBzyyWKWJA'), ('How To Check The Satellite Data - Farmonaut Mobile Apps', 'https://youtube.com/watch?v=pPcmGOmYyTc'), ("Tutorial for Farmonaut's Web App For Satellite Monitoring", 'https://youtube.com/watch?v=3erGO8xjDQY'), ('Celebrating 5 Years of Innovation in Agriculture with Farmonaut! | Farmonaut Turns 5', 'https://youtube.com/watch?v=oHFNO8LckLY'), ('Farmonaut: Cultivating Innovation in Agriculture | Year in Review 2023', 'https://youtube.com/watch?v=vRX9G9JALwc'), ("Farmonaut's Tech Advancements in Q3", 'https://youtube.com/watch?v=eNd8xCq30wc'), ("Farmonaut's Remarkable Half-Year Achievements 2023!", 'https://youtube.com/watch?v=12A8B_7uC-A'), ("Farmonaut®'s Traceability solution for Honey is going live with Dabur", 'https://youtube.com/watch?v=nU3Probs-Lk'), ('Farmonaut | Connect Your Farms With Satellites in Just 2 Minutes Using WhatsApp', 'https://youtube.com/watch?v=1MPp5ung6cI'), ("Farmonaut's Remarkable Q3 2023 Milestones in Agricultural Sector", 'https://youtube.com/watch?v=wrbn85x8bLE'), ('Satellite based WhatsApp advisory for Farmers by Farmonaut', 'https://youtube.com/watch?v=WhUG8rnrmFo'), ('Farmonaut Tutorial: Farm Mapping As a User of Smartphone Apps (Android and iOS)', 'https://youtube.com/watch?v=gRoPvQslDYc'), ('Farmonaut - STEI Foundation Africa Collaboration', 'https://youtube.com/watch?v=a-3k7TY0vzw'), ("Farmonaut's August Milestones", 'https://youtube.com/watch?v=8Hw6BlE6NFQ'), ('RADER & FARMONAUT partner for Africa Green Impact (AGI) in Central Africa & Nigeria.', 'https://youtube.com/watch?v=OD78p4IZmMQ'), ('Farmonaut®: Milestones Achieved in July 2023', 'https://youtube.com/watch?v=OuBK52GDS5g'), ('Farmonaut Satellite Monitoring Whitelabel Solutions', 'https://youtube.com/watch?v=zvlJp__of-g'), ("Farmonaut Spotlight - Q'2 - Part 2", 'https://youtube.com/watch?v=TpwEolFOgGw'), ('Farmonaut Farm Mapping Tutorial - Mobile App', 'https://youtube.com/watch?v=Uw0HzdJF6Q8'), ('Farmonaut Covered By Radix AI: Leveraging Remote Sensing and Machine Learning for a Greener Future', 'https://youtube.com/watch?v=tiB5zJ4IRu0'), ("Coromandel's My Gromor App offers satellite-based farm advice via Farmonaut to farmers", 'https://youtube.com/watch?v=LCtgELI95tA'), ("My Gromor App Brings Satellite-Powered Farm Advisory Services to India's Farmers via Farmonaut", 'https://youtube.com/watch?v=WYm1FQXUTN8'), ('Farmonaut® | 90-95% Accuracy in Organic Carbon Data From Farmonaut', 'https://youtube.com/watch?v=GuWKnuqnX0k'), ('JEEVN AI Tutorial | How to Use JEEVN AI For Generating Farm Advisory', 'https://youtube.com/watch?v=RNRN8ODo46k'), ('Introducing JEEVN AI | An AI Tool For Personalized Farm Advise', 'https://youtube.com/watch?v=25PjLwECtDo'), ('Farmonaut | How to Compare images', 'https://youtube.com/watch?v=n6KEWZClihg'), ('Farmonaut Web app | Satellite Based Crop monitoring', 'https://youtube.com/watch?v=tD7cC-dI-Yc'), ('Farmonaut Tutorial | How to Download Weather Data', 'https://youtube.com/watch?v=Azm0ajcUWng'), ('Farmonaut | How to Generate Time Lapse', 'https://youtube.com/watch?v=YSwP9iq5OXs'), ('Farmonaut Introduction - Large Scale Usage For Businesses and Governments', 'https://youtube.com/watch?v=aYUVo5u9YvE'), ('Farmonaut Introduction - English', 'https://youtube.com/watch?v=-RSqvtJ1SIE'), ('Farmonaut For Crop Area Estimation', 'https://youtube.com/watch?v=3PUPMR5Kfi4'), ('Farmonaut WhatsApp Based Satellite Advisory | 90% + Engagement Rate', 'https://youtube.com/watch?v=urSEO6KVkXM'), ('Farmonaut For Admins Tutorial Video', 'https://youtube.com/watch?v=YliR45N9B9Q'), ('Farmonaut API Video Tutorial - How To Make API Account', 'https://youtube.com/watch?v=tM8UlkbX4cI'), ('Introducing WhatsApp Based Satellite Advisory', 'https://youtube.com/watch?v=Z1ZdiKtnzgo'), ('Farmonaut | Cost Effective Blockchain Based Traceability Solutions for Textile and Fashion Industry', 'https://youtube.com/watch?v=fKOKe2fKI7A'), ('Mapping of Cotton in Maharashtra, Coriander in Rajasthan, Sugarcane in Karnataka, Banana in WB', 'https://youtube.com/watch?v=4sRXUNEgiIQ'), ('Coriander Farm Mapping Going on in Rajasthan', 'https://youtube.com/watch?v=6wy45OcgC6g'), ('Farmonaut Wishes Everyone A Very Happy Diwali!', 'https://youtube.com/watch?v=B4xF1hFvf3o'), ('Farmonaut For Crop Area Estimation', 'https://youtube.com/watch?v=RuN6nZKJV3U'), ('Farmonaut For Admins Tutorial Video', 'https://youtube.com/watch?v=jAQIOleOOBg'), ('Farmonaut Web App | Search For Farms Visited Yesterday By Satellites | Track Polygon Mapping Process', 'https://youtube.com/watch?v=FOWVebnTbOo'), ('Farmonaut For Oil Palm Plantation', 'https://youtube.com/watch?v=gSwG2pXbBLk'), ('Farmonaut® | Making Farming Better With Satellite Data', 'https://youtube.com/watch?v=DuYxCOxgl7w'), ('Farmonaut Has Received Ramaiah-Evolute Star Startup Award', 'https://youtube.com/watch?v=PAgKVOlTtd4'), ('Farmonaut Large Scale Field Mapping & Satellite Based Farm Monitoring | How To Get Started', 'https://youtube.com/watch?v=k1qdCCf-3Kw'), ('The Role of Artificial Intelligence in Agriculture - Farmonaut | Agritecture | Joyce Hunter', 'https://youtube.com/watch?v=YPZJ62YQsZY')]
'''
try:
done_posts = read_array_from_file('posts_done.txt')
except:
print(traceback.format_exc())
done_posts = []
#for temp_post in done_posts:
# is_this_a_festival_post = "no"
# if temp_post not in posts_done:
# posts_done.append(temp_post)
post_num = 0
main_folder = 'blogs'
geographies = [ ['http://en.wikipedia.org/wiki/United_States',100, "en"],
['http://en.wikipedia.org/wiki/Australia', 50, "en"],
['http://en.wikipedia.org/wiki/United_Kingdom',50, "en"],
['http://en.wikipedia.org/wiki/New_Zealand', 50, "en"],
['http://en.wikipedia.org/wiki/Europe',20, "NA"],
['http://en.wikipedia.org/wiki/Canada', 40, "NA"],
['http://en.wikipedia.org/wiki/Africa',10, "NA"],
['http://en.wikipedia.org/wiki/South_America', 10, "NA"],
['http://en.wikipedia.org/wiki/Southeast_Asia',10, "NA"]
]
main_count = 0
while True:
gpt_mini = "gpt-4o-mini-2024-07-18"
#gpt_mini = "deepseek"
gpt_main = "o1-preview"
# gpt_main = gpt_mini
deepseek_key = "sk-774110440b0e44b9a004bc38451bc7f5"
deepseek_key = api_key
geography_obj = geographies[(main_count%len(geographies))]
print(geography_obj)
geography, max_items, lang = geography_obj[0], geography_obj[1], geography_obj[2]
print(geography, max_items, lang)
main_count = main_count+ 1
APIKEY = "ae5e326f-a428-41d3-b0c8-09f1746f98b1"
## Initailize News Api
er = EventRegistry(apiKey=APIKEY, allowUseOfArchive=False)
### Usage
#location = "http://en.wikipedia.org/wiki/United_States"
# start_date = "2024-10-12" # Example start date
# end_date = "2024-10-13" # Example end date
# Get today's date and yesterday's date
end_date = datetime.datetime.today().strftime("%Y-%m-%d")
start_date = (datetime.datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
print(start_date, end_date)
# Fetch articles
articles = get_news_articles(er,start_date, end_date,geography, lang, max_items=max_items)
# Print the number of articles fetched
print(f"Number of articles fetched: {len(articles)}")
if len(articles) == 0:
# time.sleep(24*60*60)
continue
interval = round((24 * 3600 / (len(articles))-1))
print(interval)
interval = min([1800, interval])
interval = 1600
print(interval)
for article in tqdm(articles, desc="Processing News articles"):
try:
content = article.get("body", "No content available")
keywords = kw_search.extract_keywords(content,150)
keywords_array = []
for keyword, frequency in keywords:
keywords_array.append(keyword)
try:
prompt = f'Output YES if the provided text is related to any of the following a. advertisement, b. meat, c. animals, d. mentions pakistan, palestine, north korea or iran, e. suicide, murder, homicide. Only answer YES OR NO. Text: {content}'
is_advertisement = call_openai(prompt, 0, 100, gpt_mini, deepseek_key)
print(is_advertisement)
if "yes" in is_advertisement.lower():
#time.sleep(interval)
continue
prompt = f'''
Task: Refine a keyword array.
Input: {keywords_array}
Instructions:
1. Review the provided keyword array.
2. Process the keywords as follows:
a. Remove unnecessary terms, including:
- Company names (except Farmonaut)
- Person names
- Beef, Slaughter, Meat, Pork etc.
b. Mandatorily retain all the keywords of locations, cities, states, countries
c. Mandatorily retain all the keywords related to general agriculture, farming, farm machinery, harvest, fertilizers, farm inputs, irrigation, pest, disease, weeds, gis, forestry, traceability, soil, technology, consumer, employment, finance, insurance, law, real estate topics
d. For compound words lacking spaces:
- Add appropriate spacing
- Reassess relevance after spacing
e. Retain relevant abbreviations/acronyms
3. Output format:
- Return only the refined array of keywords
- Do not include any explanatory text or commentary
Note: Don't include any new keyword which is not a part of the original input keyword array.'''
#prompt = f"Farmoanut an agritech startup is writing high ranking SEO articles for it's website. Remove all unnecessary keywords including any company names, person names from this array which should not be included in this blog. You can keep the location names if relevant. There may be few keywords which are a combination of multiple words but lack space between them. In such cases, add space between those words and then decide whether to keep the keyword in the array or not. If a keyword is an abbreviation/ acronym but relevant to Farmonaut, then keep it in the array. Strictly return only the keywords as array. Don't output any other text. Keyword Array: {keywords_array}"
keywords_array = call_genai(prompt,1, 1500)
#print('keywords:',keywords_array)
keywords_array = string_to_array(keywords_array)
#time.sleep(100000)
prompt = f'Summarize the content of this blog text in 500 words: {content}'
blog_summary = call_openai(prompt, 1, 2000, gpt_mini, deepseek_key)
#blog_summary = content
#print(blog_summary)
#prompt = f'''Task: Generate SEO keyphrases for an agritech startup's website content.
prompt = f'''Task: Generate SEO keyphrases for a website content.
Input:
- Keywords: {keywords_array}
- Reference Text: {blog_summary}
Instructions:
1. Analyze the provided content text.
2. Identify up to 10 high-ranking SEO keyphrases based on the following criteria:
- Relevant to the content's main topics
- Likely to have high search volume
- Do not include any company names (including Farmonaut)
- Do not include any individual names
- Do not include anything related to Beef, Slaughter, Meat, Pork etc.
- Include a mix of short-tail and long-tail keyphrases
- Ensure keyphrases are grammatically correct and make sense in context
3. Output format:
- Return results as an array of strings
- Each keyphrase should be its own element in the array
- Do not include any explanatory text or commentary
- If fewer than 10 suitable keyphrases are found, include only those that meet the criteria
Example output format:
["keyphrase 1", "keyphrase 2", "keyphrase 3", ...]
Note: Prioritize quality and relevance over quantity.'''
#prompt = f"Farmoanut an agritech startup is writing high ranking SEO articles for it's website. Can you help identify upto 20 high ranking SEO keyphrases from this text. Don't include any keyphrase which includes a company's name or an individual's name. Strictly return result in an array format. Content Text: {content}"
keyphrases = call_genai(prompt,1, 4000)
#print(keyphrases)
prompt = f'Identify the best tone and theme from the provided array for the content given below. Answer in less than 50 words. blog_tones_and_themes_array: {blog_tones_and_themes} , content: {content}'
tone_and_theme = call_openai(prompt, 1, 200, gpt_mini, deepseek_key)
#print(tone_and_theme)
prompt = f'''Task: Generate an SEO-optimized blog title for Farmonaut.
Inputs:
- Keywords: {keywords_array}
- Key phrases: {keyphrases}
- Summary: {blog_summary}
- Tone and Theme: {tone_and_theme}
Instructions:
1. Analyze the provided keywords, key phrases, tone and theme.
2. Create a single, compelling blog title that:
- Incorporates 1-3 of the most relevant keywords, key phrases, tone and theme
- Is optimized for search engine ranking
- Accurately reflects the likely content of the blog
- Is concise (preferably 150 characters, not exceeding 200)
- Uses power words or emotional triggers if appropriate
- Clearly communicates value to the reader
- Do not include anything related to Beef, Slaughter, Meat, Pork etc.
- Do not include any person names, company names (except Farmonaut)
- Mandatorily Localize it if location names are available in keywords.
3. Language consideration:
- If the keywords/phrases are not in English, craft the title in that specific language
4. Output format:
- Return only the suggested title
- Do not include any explanatory text, quotation marks, or additional commentary'''
#prompt = f"Farmonaut wants to publish a high ranking SEO blog comprising of the following keywords: {keywords_array}, and these keyphrase: {keyphrases}. Suggest a high ranking SEO optimized title for the blog. If the keywords are in a language different than English, suggest the title in that particular language only. Don't output any other text."
title = call_genai(prompt, 1, 200)
print(title)
#continue
prompt = f'''Task: Generate an SEO-optimized blog summary.
Inputs:
- Keywords: {keywords_array}
- Key phrases: {keyphrases}
- Reference Text: {blog_summary}
Instructions:
1. Create a 150-word summary that:
- Incorporates the most relevant keywords and key phrases naturally
- Provides a clear overview of the blog's main topics
- Is optimized for search engine ranking
- Do not include an person names, company names (except Farmonaut)
- Farmonaut is not an online marketplace. Keep this in mind while writing the summary
- Do not include anything related to Beef, Slaughter, Meat, Pork etc.
- Engages the reader and encourages them to read the full blog
- Tone and Theme: {tone_and_theme}
- Highlights the value or insights the reader will gain
2. SEO Optimization:
- Include 2-3 of the most important keywords in the first sentence
- Distribute other keywords and phrases throughout the summary
- Ensure the summary reads naturally, avoiding keyword stuffing
3. Language consideration:
- If the keywords/phrases are not in English, write the summary in that specific language
4. Structure:
- Open with a hook or compelling statement
- Briefly outline the main points or sections of the blog
- Close with a teaser or call-to-action to read the full article
5. Output format:
- Provide only the 150-word summary
- Do not include any additional text, explanations, or metadata
'''
#prompt = f"Farmonaut wants to publish a high ranking SEO blog comprising of the following keywords: {keywords_array}, and these keyphrases: {keyphrases}. Suggest a highranking SEO optimized context/summary for this blog in 150 words. If the keywords are in a language different than English, write the context/summary in that particular language only. Don't output any other text."
caption = call_genai(prompt, 1, 500)
#print(caption)
post_data = {
'caption': caption,
'media': []
}
prompt = f'''Make 2 interesting short quantitative trivia statements upto 20 words each based upon these:
- title: {title}
- information: {caption}
- tone and theme: {tone_and_theme}
- exclude: don't make trivia about benefits of drones and IoTs
Output Format:
-[trivia1, trivia2]'''
trivias = call_genai(prompt, 0, 500)
# trivias = call_genai(prompt, 1, 500)
#print(trivias)
prompt = f'''Identify 4 videos from this array which are in the format [(title, video_id)] which best match this content: {content}. Strictly return the response in this format [(title1, video_id1, title2, video_id2),...]. Video Array: {youtube_videos}.
'''
suggested_videos = call_openai(prompt, 1, 500, gpt_mini, deepseek_key)
# print(suggested_videos)
prompt = f'''Task: Recommend an optimal table type for Farmonaut's SEO blog.
Inputs:
- Title: {title}
- Context: {caption}
- Tone and Theme: {tone_and_theme}
Instructions:
1. Analyze the provided inputs to understand the blog's focus.
2. Suggest a table type that:
- Enhances reader understanding of the content
- Aligns with SEO best practices
3. Table recommendation criteria:
- Clarity: Easily understood by target audience
- Relevance: Directly relates to blog content
- Using only estimated values while adding any quantitative data
4. Language consideration:
- Use the same language as the provided inputs
5. Output format:
- Provide a 150-word description of the recommended table
- Focus on table type, key columns/rows
- Do not include any additional text or explanations
Note: Prioritize a table suggestion that adds substantive value to the blog.'''
#prompt = f"Farmonaut wants to publish a high ranking SEO blog comprising of the following keywords: {keywords_array}, title: {title}, context: {caption}. Suggest what type of table will be the best to include in this blog. If the keywords, title, context are in a language different than English, suggest the table info in that particular language only. This table should make it clear to the reader how Farmonaut Satellite System can be useful in the context of this blog. Answer only in 50 words. Don't output any other text."
table_info = call_genai(prompt, 1, 500)
#print(table_info)
prompt = f'''
Task: Suggest two SEO-optimizing AI image descriptions (DALL-E 3) for a SEO optimized blog.
Inputs:
a. Keywords: {keywords_array}
b. Title: {title}
c. Context: {caption}
d. KeyPhrases: {keyphrases}
e. Tone and Theme {tone_and_theme}
Instructions:
1. Analyze the provided inputs to understand the blog's theme and focus.
2. Create two distinct image descriptions that:
- Strongly relate to the blog's content
- Incorporate relevant keywords naturally
- Enhance SEO potential and reader engagement
- Are highly detailed and realistic
- Avoid depicting specific individuals or people from the Middle East and India
- Don't depict drones and IoT Devices in the images
Image description criteria:
- Relevance: Directly supports blog content
- Vividness: Uses rich, descriptive language for clarity
- SEO value: Incorporates 2-3 key terms from the provided keywords
- Uniqueness: Each image should highlight different aspects of the topic
- Realism: Emphasize realistic, practical scenarios
Output format:
[
"Detailed description of first image, incorporating relevant keywords and focusing on a key aspect of the blog topic.",
"Detailed description of second image, highlighting a different facet of the blog content or technologies, using appropriate keywords."
]
Note: Focus on creating descriptions that would result in images that add significant value to the blog post while optimizing for search engines. Ensure descriptions are distinct from each other and highly relevant to the content. DO NOT OUTPUT ANY OTHER TEXT WITH THE RESPONSE.
'''
#prompt = f"Farmonaut wants to publish a high ranking SEO blog comprising of the following a. keywords: {keywords_array}, b. title: {title}, c. context: {caption}. Suggest descriptions of two DALL-E generated AI images that should be added to the blog for achieving high search engine ranking. Make them extremely realistic wherever possible. Don't include any person from the middle east in the images. Provide response in the following array format: [image1_description, image2_description] Don't output any other text."
image_descriptions = call_genai(prompt, 1, 1000)
#print(image_descriptions)
image_descriptions = string_to_array(image_descriptions)
media_url_arr = []
#print(image_descriptions)
#time.sleep(10000)
for image_description in image_descriptions:
try:
media_url_arr.append(ai_image_url(image_description))
except:
print(traceback.format_exc())
# image_url1 = ai_image_url(image_descriptions[0])
#image_url2 = ai_image_url(image_descriptions[1])
#media_url_arr = [image_url1, image_url2]
# print(media_url_arr)
# print('processing media')
media_info = process_media2(media_url_arr, title)
if media_info:
post_data['media'].extend(media_info)
post_data['featured_media'] = media_info[0]['id'] if media_info else None
#print('media processed')
stripe_html = '''
, , , , , ,
,
, etc.
* Organize the post using clear subheadings and bullet points (with subheading text in font color #034d5c) to enhance readability.
* Ensure the blog is mobile responsive.
3. Keyword & Phrase Integration:
* Seamlessly incorporate as many of the provided keywords ({keywords_array}) as naturally possible.
* Include the key phrases ({keyphrases}) throughout the content.
* If the keywords/key phrases are in a language other than English, write the entire post in that language.
4. Localization:
* Adapt the content to reflect any local context when location names are included in the keywords.
5. Farmonaut-Specific Guidance:
* Base any Farmonaut-related information solely on the context provided in {farmonaut_info}.
* Do not include case studies, success stories, or any hallucinated details about Farmonaut.
* Reiterate that Farmonaut is neither an online marketplace nor a manufacturer/seller of farm inputs or machinery, and it is not a regulatory body.
6. SEO Compliance:
* Implement all the latest SEO recommendations as specified in {yoast_guidelines2}.
Media & Multimedia Integration:
1. Images:
* Insert all images provided in the JSON object {post_data['media']}.
* Each image must have:
* Border-radius: 16px
* Box-shadow: 10px 10px 15px
* Cursor: pointer
* Ensure that clicking any image redirects to https://farmonaut.com/app_redirect.
* All images should appear within the top 75% of the blog content.
2. YouTube Videos:
* Embed videos from the list {suggested_videos}, where each item is a tuple formatted as (‘video_title’, ‘video_id’).
* Construct the full URL for each video as: https://youtube.com/watch?v=video_id
* Videos must have:
* Width: 100%
* Height: 500px
* Border-radius: 16px
* Box-shadow: 10px 10px 15px
* Place these videos uniformly within the top 75% of the blog content.
3. Tables:
* Insert a beautifully formatted table based on the context provided in {table_info}.
4. Trivia Sections:
* Include trivia snippets at both the top and the middle of the blog post.
* Format these trivia sections with:
* Bold and italic text
* Enclosed in double-quotes
* Font-size: 50px
* Line-height: 50px
* Color: #034d5c
5. Links & Buttons:
* Distribute the following links (formatted in bold) within the top 75% of the content:
* App Button:
* Image: https://farmonaut.com/Images/web_app_button.png
* Link: https://farmonaut.com/app_redirect
* Height: 80px
* API: https://sat.farmonaut.com/api
* API Developer Docs: https://farmonaut.com/farmonaut-satellite-weather-api-developer-docs/
* Android App Button:
* Image: https://farmonaut.com/wp-content/uploads/2020/01/get_it_on_google_play.png
* Link: https://play.google.com/store/apps/details?id=com.farmonaut.android
* Height: 80px
* iOS App Button:
* Image: https://farmonaut.com/wp-content/uploads/2020/01/available_on_app_store.png
* Link: https://apps.apple.com/in/app/farmonaut/id1489095847
* Height: 80px
* Earn With Farmonaut:
* Link: https://farmonaut.com/affiliate-program
* Accompany this link with a YouTube video: https://www.youtube.com/watch?v=QydYrdtPBP0
* Include a summary:"Earn 20% recurring commission with Farmonaut's affiliate program by sharing your promo code and helping farmers save 10%. Onboard 10 Elite farmers monthly to earn a minimum of $148,000 annually—start now and grow your income!"
* Format this affiliate summary in bold and use a font size larger than the main blog text.
* Subscription HTML:
* Embed the custom HTML for Farmonaut subscriptions provided as {stripe_html}.
6. FAQ Section:
* Include a detailed FAQ section near the end of the post to address common queries.
Additional Guidelines:
* Accuracy & Factuality:
* All content must be factual and based solely on the information provided.
* Do not include any external case studies, success stories, or unverified claims.
* Content Organization:
* Arrange sections logically with smooth transitions.
* Use varied sentence structures to maintain an engaging flow.
* Exclusions:
* Do not mention any partnerships or collaborations.
* Avoid any content that misrepresents Farmonaut’s business model.
Final Instruction:Generate the blog post following all of the above requirements, ensuring it is at least 3500 words long, fully compliant with HTML formatting, SEO guidelines, multimedia integration, and the specified style and tone.
'''
#prompt = f"Generate a HTML formatted very detailed and comprehensive blog post of at least 5000 words with , ,
, , ,
,
,
, blocks wherever necessary in informational tone and as a first-person plural (we, us, our, ours) mandatorily including the following keywords: {keywords_array}. Mandatorily included these keyphrases as well: {keyphrases} \n\n. Don't include the title in the blog content. The blog needs to be at least 5000 words in length. If the keywords, keyphrases, title, context are in a language different than English, write the blog in that particular language only. Please don't include any hallucinated information about Farmonaut in the blog. It is strictly prohibited to include any case study or success story in the blog. To add any more details in the blog related to Farmonaut, use information from this text and further elaborate on it if necessary: {farmonaut_info} \n\n Strictly Incorporate these keywords into the blog: {keywords_array}. If any of the keywords look unrelated and out of context to the blog, then don't add them to the blog. Add Images (URLs) from this JSON object {post_data['media']} into the blog in
blocks wherever necessary including the absolute top of the blog. Include the table in the blog using this context: {table_info}. Add links to https://farmonaut.com/app_redirect, https://sat.farmonaut.com/api, https://play.google.com/store/apps/details?id=com.farmonaut.android, https://apps.apple.com/in/app/farmonaut/id1489095847, https://farmonaut.com/farmonaut-satellite-weather-api-developer-docs/ wherever necessary. Include this custom HTML code for subscribing to Farmonaut: {stripe_html} \n Add bullet points and subheadings wherever necessary. Please include an FAQ section as well. The output should not have any other text apart from the content of the blog."
#blog_content = call_genai(prompt, 1, 8000)
blog_content = call_openai(prompt, 1, 8000, 'o3-mini', deepseek_key)
# print(blog_content)
category_ids = "south-america: 580, asia: 579, africa:578, united-kingdom: 577, canada: 576, europe: 575, australia: 574, blogs: 5, news:573, case_study:546, area_estimation:542, remote_sensing:9, precision_farming:548, api_development:572, usa:561"
prompt = f'Based upon this title: {title}, and caption: {caption} , identify the best category id in which this title fits in: {category_ids}. Strictly only return the integer value as the response'
category_id = call_genai(prompt, 0, 5)
try:
category_id = int(category_id)
except:
category_id = 5
#publish_to_wordpress(title, blog_content, post_data['media'], post_data['caption'], category_id)
post_data['title'] = title
post_data['content'] = blog_content
post_data['status'] = 'publish'
post_data['excerpt'] = caption
post_data['comment_status'] = 'open'
post_data['categories'] = [category_id]
prompt = f"Can you convert this text into a url slug. Don't output any other text. Text to convert to url slug: {title}"
slug = call_genai(prompt,0, 500)
try:
schema_media = [post_data['media'][0]['url'], post_data['media'][1]['url']]
# print(schema_media)
except:
print(traceback.format_exc())
schema_media = media_url_arr
structured_schema_script = (f'')
#print(structured_schema_script)
post_data["content"] = structured_schema_script + blog_content
publish_or_update_wordpress_post(wp_url, wp_username, wp_password, post_data)
save_to_file('posts_done.txt', str(done_posts))
time.sleep(interval)
except:
print(traceback.format_exc())
time.sleep(70)
remove_all_files_in_folder('insta_files')
# delete_files.delete_files_and_empty_folders()
# delete_files.delete_files_from_paths()
except:
time.sleep(10)
print(traceback.format_exc())
#time.sleep(24*60*60)
main()