import os from PIL import Image import cv2 import anthropic import requests from dotenv import load_dotenv import pytesseract import tempfile import ssl import instaloader import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer import numpy as np from requests.auth import HTTPBasicAuth import mimetypes import os from PIL import Image import cv2 import anthropic from wordpress_xmlrpc import Client, WordPressPost from wordpress_xmlrpc.methods.posts import NewPost from wordpress_xmlrpc.compat import xmlrpc_client from dotenv import load_dotenv import pytesseract import tempfile import base64 from instascrape import Profile, Post import requests from io import BytesIO import time import json import requests import traceback from urllib.parse import urlparse import os import openpyxl from openai import OpenAI import ast client = OpenAI( api_key = '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' ) import os import shutil import ast import openpyxl from difflib import SequenceMatcher import pandas as pd from fuzzywuzzy import fuzz import requests from requests.auth import HTTPBasicAuth import json import requests from requests.auth import HTTPBasicAuth import json import html import re from requests.auth import HTTPBasicAuth def normalize_title(title): # Decode HTML entities decoded = html.unescape(title) # Remove any remaining HTML tags no_html = re.sub('<[^<]+?>', '', decoded) # Convert to lowercase and remove non-alphanumeric characters return re.sub(r'[^a-z0-9]', '', no_html.lower()) def publish_or_update_wordpress_post(site_url, username, password, post_data): # WordPress REST API endpoints posts_url = f"{site_url}/wp-json/wp/v2/posts" # Set up authentication auth = HTTPBasicAuth(username, password) # Check if the API is accessible try: response = requests.get(f"{site_url}/wp-json", auth=auth) response.raise_for_status() except requests.exceptions.RequestException as e: raise Exception(f"Failed to access WordPress API: {str(e)}") # Check if a post with the same title exists existing_post = check_existing_post_by_title(site_url, auth, post_data['title']) if existing_post: # Update existing post update_url = f"{posts_url}/{existing_post['id']}" post_data['id'] = existing_post['id'] response = requests.post(update_url, json=post_data, auth=auth) else: # Create new post response = requests.post(posts_url, json=post_data, auth=auth) if response.status_code in [200, 201]: return response.json() else: raise Exception(f"Failed to publish/update post: {response.text}") def check_existing_post_by_title(site_url, auth, title): # URL encode the title encoded_title = requests.utils.quote(title) # print(encoded_title) search_url = f"{site_url}/wp-json/wp/v2/posts?search={encoded_title}" # print(search_url) response = requests.get(search_url, auth=auth) title = normalize_title(title) if response.status_code == 200: # print('existing post found') posts = response.json() for post in posts: rendered_title = normalize_title(post['title']['rendered'].lower()) # print(post['title']['rendered'].lower()) # print(title.lower()) print(rendered_title, title) if rendered_title == title: print('post found', rendered_title) return post return None #try: # result = publish_or_update_wordpress_post(site_url, username, password, post_data) # print("Post published/updated successfully:", result['link']) #except Exception as e: # print("Error:", str(e)) def replace_keywords(excel_file, sheet_name, keyword_array): # Read the Excel sheet df = pd.read_excel(excel_file, sheet_name=sheet_name) # Ensure the column with keywords is named 'Keywords' if 'Keywords' not in df.columns: raise ValueError("Excel sheet must have a 'Keywords' column") # Get the list of keywords from the Excel sheet excel_keywords = df['Keywords'].tolist() # Function to find the best match for a keyword def find_best_match(keyword): best_match = max(excel_keywords, key=lambda x: fuzz.ratio(keyword.lower(), str(x).lower())) return best_match if fuzz.ratio(keyword.lower(), str(best_match).lower()) > 70 else keyword # Replace keywords in the array with best matches replaced_keywords = [find_best_match(keyword) for keyword in keyword_array] return replaced_keywords def match_keywords(excel_path, sheet_name, text, column_letter='A'): # Open the Excel workbook and select the specified sheet workbook = openpyxl.load_workbook(excel_path) sheet = workbook[sheet_name] # Read keywords/phrases from the Excel sheet keywords = [cell.value for cell in sheet[column_letter] if cell.value] # Function to calculate similarity ratio def similarity(a, b): return SequenceMatcher(None, a.lower(), b.lower()).ratio() # Calculate similarity scores for each keyword/phrase scores = [(keyword, max(similarity(keyword, word) for word in text.split())) for keyword in keywords] # Sort by similarity score in descending order and get top 5 top_matches = sorted(scores, key=lambda x: x[1], reverse=True)[:5] # Return only the keywords/phrases, not the scores return [match[0] for match in top_matches] def remove_common_elements(array1, array2): return [x for x in array1 if x not in set(array2)] def remove_keyphrases_from_excel(file_path, keyphrases, output_path=None): # Load the workbook wb = openpyxl.load_workbook(file_path) # Iterate through all sheets for sheet in wb.worksheets: # Iterate through all cells in the sheet for row in sheet.iter_rows(): for cell in row: if cell.value: # Convert cell value to string cell_value = str(cell.value) # Check if any keyphrase is in the cell value for phrase in keyphrases: if phrase in cell_value: # Remove the keyphrase cell_value = cell_value.replace(phrase, '') # Update the cell value cell.value = cell_value if cell_value.strip() else None # Save the modified workbook if output_path: wb.save(output_path) else: wb.save(file_path) print("Keyphrases removed successfully.") def read_array_from_file(file_path): # Open the text file in read mode with open(file_path, 'r') as file: # Read the content of the file content = file.read() # Use ast.literal_eval to safely evaluate the string representation of the array array = ast.literal_eval(content) return array def remove_all_files_in_folder(folder_path): # Check if the folder exists if not os.path.exists(folder_path): #print(f"The folder {folder_path} does not exist.") return # Iterate over all files in the folder and remove them for filename in os.listdir(folder_path): file_path = os.path.join(folder_path, filename) try: # Check if it's a file (not a folder) and remove it if os.path.isfile(file_path): os.remove(file_path) elif os.path.isdir(file_path): # If it's a directory, remove the directory and its contents shutil.rmtree(file_path) except Exception as e: print(f"Error deleting {file_path}: {e}") print(f"All files and directories in {folder_path} have been removed.") def call_openai(prompt, temperature, max_tokens): #prompt = "Farmonaut wants to classify this google search query into only one of the following categories: a. Precision Agriculture, b. API/ Development, c. Traceability, d. Plantation, e. Unrelated, f. pests, diseases and weeds, g. irrigation, h. yield forecast, i. area estimation and crop identification, j. geotagging, k. fertilizers and soil health, l. Satellite/GIS/Remote Sensing, m. agri-tech startup/ company, n. agriculture content, o. not worth doing SEO. Answer only one category (without category alphabet). Google search query to classify: " + google_query #prompt = "Classify whether this google search query is related or unrelated to what Farmonaut does. Strictly answer RELATED OR UNRELATED : " + google_query completion = client.chat.completions.create( model="gpt-4o-mini-2024-07-18", messages=[ {"role": "system", "content": "You are a expert in SEO and a representative of Farmonaut."}, {"role": "user", "content": prompt} ], max_tokens=max_tokens, temperature=temperature ) return completion.choices[0].message.content def save_to_file(file_name, content): with open(file_name, 'w') as file: file.write(content) print(f"Content saved to {file_name}") def string_to_array(string): # Use ast.literal_eval to safely evaluate the string as a list try: array = ast.literal_eval(string) print(array) except: array = string return array def get_first_column_values(file_path, sheet_name=None): # Load the workbook workbook = openpyxl.load_workbook(file_path, data_only=True) # If a sheet name is specified, load that sheet, otherwise use the active sheet sheet = workbook[sheet_name] if sheet_name else workbook.active # Get all the values from the first column (Column A) first_column_values = [] for cell in sheet['A']: # Convert cell value to string and append to list, handle empty cells first_column_values.append(str(cell.value) if cell.value is not None else "") return first_column_values # Example usage: # file_path = 'your_file.xlsx' # values = get_first_column_values(file_path, 'Sheet1') # print(values) def get_file_extension(url): # Parse the URL parsed_url = urlparse(url) # Get the path component of the URL path = parsed_url.path # Extract the file extension file_extension = os.path.splitext(path)[1] # Return the extension (without the dot) or an empty string if there's no extension return file_extension[1:] if file_extension else "" # posts = list(posts) ssl._create_default_https_context = ssl._create_unverified_context # Load environment variables load_dotenv() # Instagram session id (you need to get this from your browser after logging in to Instagram) SESSIONID = os.getenv("INSTAGRAM_SESSIONID") # Headers for Instagram requests headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.74 Safari/537.36 Edg/79.0.309.43", "cookie": f'sessionid={SESSIONID};' } # Initialize Instaloader L = instaloader.Instaloader() #L.login('himanshujain4578', 'harish@4321') L.post_metadata_txt_pattern = "" L.download_geotags = False L.save_metadata = False L.save_metadata_json = False L.download_comments = False # Anthropic API key (replace with your actual key) #anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") # WordPress credentials wp_url = "https://www.farmonaut.com" wp_username = "ankuromar296" wp_password = "Tjat A2hz 9XMv pXJi YbV0 GR8o" def remove_keywords(selected_keywords, excel_file, sheet_name, keyword_column, num_keywords=5): # Read the Excel sheet df = pd.read_excel(excel_file, sheet_name=sheet_name) # Ensure the keyword column exists if keyword_column not in df.columns: raise ValueError(f"Column '{keyword_column}' not found in the Excel sheet.") # Remove the selected keywords from the DataFrame df = df[~df[keyword_column].isin(selected_keywords)] # Save the updated DataFrame back to the Excel file df.to_excel(excel_file, sheet_name=sheet_name, index=False) def select_and_remove_keywords(text, excel_file, sheet_name, keyword_column, num_keywords=5): # Read the Excel sheet df = pd.read_excel(excel_file, sheet_name=sheet_name) # Ensure the keyword column exists if keyword_column not in df.columns: raise ValueError(f"Column '{keyword_column}' not found in the Excel sheet.") # Get the list of keywords keywords = df[keyword_column].tolist() # Create a TF-IDF vectorizer vectorizer = TfidfVectorizer() # Ensure text and keywords are strings text = str(text) if isinstance(text, dict) else text keywords = [str(keyword) if isinstance(keyword, dict) else keyword for keyword in keywords] # Fit the vectorizer on the text and transform the keywords tfidf_matrix = vectorizer.fit_transform([text] + keywords) # Calculate cosine similarity between the text and each keyword cosine_similarities = (tfidf_matrix * tfidf_matrix.T).toarray()[0][1:] # Get the indices of the top num_keywords similar keywords top_indices = np.argsort(cosine_similarities)[-num_keywords:][::-1] # Select the top keywords selected_keywords = [keywords[i] for i in top_indices] # Remove the selected keywords from the DataFrame df = df[~df[keyword_column].isin(selected_keywords)] # Save the updated DataFrame back to the Excel file df.to_excel(excel_file, sheet_name=sheet_name, index=False) return selected_keywords # Existing functions remain the same # (select_and_remove_keywords, get_instagram_posts, extract_text_from_image, extract_text_from_video, generate_blog_content) def call_genai(prompt, temperature, max_tokens): client = anthropic.Anthropic( # defaults to os.environ.get("ANTHROPIC_API_KEY") api_key="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", ) message = client.messages.create( model="claude-3-5-sonnet-20240620", max_tokens=max_tokens, temperature=temperature, system = "You are an SEO expert, a gis/ remote sensing expert, an agriculture and horticulture scientist, and a representative of Farmonaut (farmonaut.com).", messages=[ {"role": "user", "content": prompt} ] ) #print(message) return message.content[0].text def upload_media_to_wordpress(file_path, title): endpoint = f"{wp_url}/wp-json/wp/v2/media" auth = HTTPBasicAuth(wp_username, wp_password) mime_type, _ = mimetypes.guess_type(file_path) media_data = { 'alt_text':title, 'caption':title, 'description':title } with open(file_path, 'rb') as file: files = {'file': (os.path.basename(file_path), file, mime_type)} response = requests.post(endpoint, files=files, auth=auth, json = media_data) if response.status_code == 201: return response.json()['id'], response.json()['source_url'] else: print(f"Failed to upload media. Status code: {response.status_code}") print(f"Response: {response.text}") return None, None def extract_text_from_video(video_path): video = cv2.VideoCapture(video_path) fps = int(video.get(cv2.CAP_PROP_FPS)) frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) duration = frame_count / fps text = "" for i in range(0, int(duration), 1): video.set(cv2.CAP_PROP_POS_MSEC, i * 1000) success, frame = video.read() if not success: break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame_text = pytesseract.image_to_string(gray) text += frame_text + "\n" video.release() return text def process_media(post): media_info = [] with tempfile.TemporaryDirectory() as tmpdir: L.download_post(post, target=tmpdir) media_files = os.listdir(tmpdir) for file in media_files: file_path = os.path.join(tmpdir, file) if file.endswith('.mp4'): text = extract_text_from_video(file_path) media_type = 'video' elif file.endswith(('.jpg', '.jpeg', '.png', 'heic')): text = extract_text_from_image(file_path) media_type = 'image' # Upload image to WordPress media_id, media_url = upload_media_to_wordpress(file_path) if media_id and media_url: media_info.append({ 'type': media_type, 'text': text, 'id': media_id, 'url': media_url }) else: continue # Skip files that are neither videos nor images return media_info def process_media2(media_url_arr, title): #print(media_url_arr) media_info = [] media_num = 0 folder_name = 'insta_files' for url in media_url_arr: #print(url) media_num = media_num + 1 file_path = folder_name + '/' + str(media_num) + '.' + str(get_file_extension(url)) try: response = requests.get(url, stream=True) except: print(traceback.format_exc()) response = None if response is None: continue os.makedirs(os.path.dirname(file_path), exist_ok=True) if response.status_code == 200: with open(file_path, 'wb') as file: for chunk in response.iter_content(chunk_size=8192): if chunk: file.write(chunk) print(f"File downloaded successfully: {file_path}") else: print(f"Failed to download file. Status code: {response.status_code}") # with tempfile.TemporaryDirectory() as tmpdir: # for media_url in media_url_arr: # L.download_post(post, target=tmpdir) media_files = os.listdir(folder_name) for file in media_files: file_path = os.path.join(folder_name, file) media_id, media_url = upload_media_to_wordpress(file_path, title) if media_id and media_url: media_info.append({ 'type': 'image', 'text': title, 'id': media_id, 'url': media_url }) else: continue # Skip files that are neither videos nor images return media_info def extract_text_from_image(image_path): image = Image.open(image_path) text = pytesseract.image_to_string(image) return text def publish_to_wordpress(title, content, media_info, excerpt, category_id): endpoint = f"{wp_url}/posts" auth = HTTPBasicAuth(wp_username, wp_password) # Add images to the content for media in media_info: if media['type'] == 'image': content += f'\n\nInstagram image' slug = title.replace(' ', '-') post_data = { 'title': title, 'content': content, 'status': 'publish', 'excerpt':excerpt, 'slug':slug, 'comment_status':'open', 'categories':[category_id], 'featured_media': media_info[0]['id'] if media_info else None # Set the first image as featured image } response = requests.post(endpoint, json=post_data, auth=auth) if response.status_code == 201: print("Post published successfully!") else: print(f"Failed to publish post. Status code: {response.status_code}") print(f"Response: {response.text}") def get_instagram_posts(username, limit=100): #profile = instaloader.Profile.from_username(L.context, username) #posts = list(posts) return posts[:limit] import pandas as pd import ast def get_keyword_arrays(file_path): # Read the Excel file into a DataFrame df = pd.read_excel(file_path, usecols=['Keywords']) # Iterate through each row and convert the string representation of the list into an actual list for keywords in df['Keywords']: try: # Safely evaluate the string to convert it into a list keyword_array = ast.literal_eval(keywords) yield keyword_array except (ValueError, SyntaxError): # Handle cases where the string is not a valid list format print(f"Invalid format for keywords: {keywords}") yield [] def ai_image_url(prompt): print(prompt) response = client.images.generate( model="dall-e-3", prompt=prompt, size="1024x1024", quality="standard", n=1, ) image_url = response.data[0].url return image_url def main(): total_count, good_count, bad_count = 0,0,0 # posts_done = ['C_kYH2NJURr', 'C_aMTxcN0Vs', 'C_TJJ_MN2jD', 'C_P35JyyIVW', 'C-_8t2MvV--', 'C-mgOBuR_pU'] posts_done = [] posts_failed = [] #posts = get_instagram_posts(instagram_username) #posts = posts['data']['xdt_api__v1__feed__user_timeline_graphql_connection']['edges'] farmonaut_info = ''' Farmonaut is a pioneering agricultural technology company that offers advanced, satellite-based farm management solutions. Its mission is to make precision agriculture affordable and accessible to farmers worldwide by integrating innovative technology and data-driven insights into traditional farming practices. Farmonaut's platform provides valuable services such as real-time crop health monitoring, AI-based advisory systems, blockchain-based traceability, and resource management tools. Here's a comprehensive overview of its technologies, business model, target audience, and value proposition. 1. Technologies Farmonaut leverages a blend of satellite imagery, artificial intelligence (AI), blockchain, and machine learning to address various agricultural challenges. Satellite-Based Crop Health Monitoring: Farmonaut uses multispectral satellite images to monitor crop health, providing farmers with insights into vegetation health (NDVI), soil moisture levels, and other critical metrics. This data helps farmers make informed decisions about irrigation, fertilizer usage, and pest management, ultimately optimizing crop yields and reducing resource wastage​( Farmonaut )​( Farmonaut ). Jeevn AI Advisory System: This AI-driven personalized farm advisory tool delivers real-time insights, weather forecasts, and expert crop management strategies to farmers. Jeevn AI analyzes satellite data and other inputs to generate customized advice, improving farm productivity and efficiency​( Farmonaut ). Blockchain-Based Product Traceability: By integrating blockchain technology, Farmonaut enables traceability solutions for various industries, particularly agriculture. Blockchain ensures that every stage of the product's journey, from farm to consumer, is transparent and secure, thereby enhancing trust and reducing fraud in supply chains​( Farmonaut ). Fleet and Resource Management: Farmonaut provides tools for fleet management, enabling agribusinesses to manage their logistics more efficiently. This helps reduce operational costs by optimizing vehicle usage, ensuring safety, and improving the overall management of agricultural machinery​( Farmonaut ). Carbon Footprinting: To help agribusinesses monitor and reduce their environmental impact, Farmonaut offers carbon footprint tracking. This feature provides real-time data on emissions, allowing businesses to take steps towards sustainability and compliance with environmental regulations​( Farmonaut )​( Farmonaut ). 2. Business Model Farmonaut operates on a subscription-based model, offering various packages that cater to individual farmers, cooperatives, agribusinesses, and government institutions. The company provides flexible pricing tiers depending on the number of hectares monitored and the frequency of satellite data updates. Their platform is accessible through web and mobile apps, making it easy for users to track and manage their farms from anywhere​( Farmonaut ). The business also extends its services through APIs, enabling developers and businesses to integrate Farmonaut’s satellite and weather data into their own systems. This API access broadens the company’s revenue streams and encourages collaboration with other agritech companies and agricultural research organizations​( Farmonaut ). Additionally, Farmonaut partners with financial institutions to provide satellite-based verification for crop loans and insurance, reducing the likelihood of fraud and improving access to financing for farmers​( Farmonaut )​( Farmonaut ). 3. Target Audience Farmonaut’s solutions are tailored for a wide range of users in the agricultural ecosystem: Individual Farmers: The platform offers individual farmers access to precise, real-time data on their crops. This allows them to make better decisions regarding crop health, pest management, and resource allocation. Small and medium-sized farms can particularly benefit from these affordable precision farming solutions​( Farmonaut ). Agribusinesses: Large-scale agribusinesses use Farmonaut for plantation management, fleet tracking, and resource optimization. These businesses can manage vast farming operations more efficiently by leveraging satellite monitoring and AI-driven insights​( Farmonaut ). Governments and NGOs: Farmonaut works with government agencies and non-governmental organizations (NGOs) to improve agricultural productivity, implement large-scale farm monitoring programs, and support sustainable farming initiatives. Governments also use Farmonaut's tools for crop area and yield estimation, especially in policy and subsidy distribution​( Farmonaut ). Financial Institutions: By providing satellite-based verification of farms, Farmonaut helps banks and insurance companies streamline crop loan approvals and reduce fraudulent claims in agricultural insurance​( Farmonaut )​( Farmonaut ). Corporate Clients: Companies, especially in sectors like textile and food, use Farmonaut's blockchain-based traceability solutions to ensure the authenticity and transparency of their supply chains. This strengthens consumer trust and enhances the brand's reputation​( Farmonaut ). 4. Value Proposition and Benefits Farmonaut’s key value propositions include: Cost-Effective Precision Agriculture: Farmonaut democratizes access to precision agriculture by offering affordable services for real-time crop monitoring and farm management. Unlike traditional precision farming tools that require expensive hardware, Farmonaut relies on satellite imagery, making it a more economical solution for farmers of all scales​( Farmonaut ). Increased Farm Productivity: By providing real-time data on crop health, soil moisture, and weather patterns, Farmonaut allows farmers to make informed decisions that optimize their resources. This leads to better crop yields, reduced input costs, and minimized crop losses​( Farmonaut ). Sustainability: Through features like carbon footprint tracking and efficient resource management, Farmonaut promotes sustainable farming practices. This is crucial in today’s agriculture, where there is growing pressure to reduce environmental impact while increasing food production​( Farmonaut ). Transparency and Trust: Farmonaut’s blockchain-based traceability solution ensures transparency in supply chains, particularly for corporate clients in agriculture and related sectors. By offering verifiable data on product origin and journey, the system helps build consumer trust​( Farmonaut ). Access to Financing: Farmonaut's partnerships with financial institutions provide farmers with satellite-based verification for loans and insurance. This improves access to financing while reducing the risks for lenders​( Farmonaut )​( Farmonaut ). Scalability: The platform is highly scalable, serving clients from smallholder farmers to large agribusinesses and government bodies. Its modular design allows users to choose the services they need and scale them up as their operations grow​( Farmonaut ). Conclusion Farmonaut stands out in the agritech space by offering a comprehensive suite of tools that combine satellite technology, AI, and blockchain to meet the diverse needs of modern agriculture. Whether it's precision farming, supply chain transparency, or sustainability, Farmonaut is at the forefront of the agricultural revolution, making it easier for farmers and agribusinesses to thrive in an increasingly data-driven world. By lowering the cost barrier and providing advanced solutions, Farmonaut continues to empower farmers, improve productivity, and promote sustainable agricultural practices globally. ''' try: done_posts = read_array_from_file('posts_done.txt') except: done_posts = [] #for temp_post in done_posts: # is_this_a_festival_post = "no" # if temp_post not in posts_done: # posts_done.append(temp_post) post_num = 0 # Example usage file_path = 'clustered_keywords.xlsx' for keyword_array in get_keyword_arrays(file_path): print(keyword_array) #for post in posts: post_num = post_num + 1 print('post_num', post_num) if post_num > 20: try: keywords = keyword_array keywords_formatted = keywords #print(post_data['caption']) prompt = f"farmonaut want to publish a high ranking SEO blog comprising of the following keywords: {keyword_array}. Suggest a high ranking SEO optimized title for the blog. Don't output any other text." title = call_genai(prompt, 0, 50) print(title) prompt = f"farmonaut want to publish a high ranking SEO blog comprising of the following keywords: {keyword_array}. Suggest a highranking SEO optimized context/summary for this blog in 50 words. Don't output any other text." caption = call_genai(prompt, 0, 150) print(caption) post_data = { 'caption': caption, 'media': [] } prompt = f"farmonaut want to publish a high ranking SEO blog comprising of the following a. keywords: {keyword_array}, b. title: {title}, c. context: {caption}. Suggest description of two DALL-E generated AI images that should be added to the blog for achieving high search engine ranking. Provide response in the following array format: [image1_description, image2_description] Don't output any other text." image_descriptions = call_genai(prompt, 0, 300) image_descriptions = string_to_array(image_descriptions) print(image_descriptions) image_url1 = ai_image_url(image_descriptions[0]) image_url2 = ai_image_url(image_descriptions[1]) media_url_arr = [image_url1, image_url2] print(media_url_arr) media_info = process_media2(media_url_arr, title) if media_info: post_data['media'].extend(media_info) post_data['featured_media'] = media_info[0]['id'] if media_info else None stripe_html = ''' ''' prompt = f"Generate a HTML formatted very detailed and comprehensive blog post of at least 5000 words with ,

,

, , ,
    ,
      ,
      ,

      , blocks wherever necessary in informational tone and as a first-person plural (we, us, our, ours) mandatorily including the following keywords: {keywords} \n\n The blog needs to be at least 5000 words in length. Please don't include any hallucinated information about Farmonaut in the blog. Please don't include any case study in the blog. To add any more details in the blog related to Farmonaut, use information from this text and further elaborate on it if necessary: {farmonaut_info} \n\n Strictly Incorporate these keywords into the blog: {keywords_formatted}. If any of the keywords look unrelated and out of context to the blog, then don't add them to the blog. Add Images (URLs) from this JSON object {post_data['media']} into the blog in blocks wherever necessary including the absolute top of the blog. If it matches with the theme of the blog, add a table of how Farmonaut Satellite System is better than drone and IoT based farm monitoring. Add links to https://farmonaut.com/app_redirect, https://sat.farmonaut.com/api, https://play.google.com/store/apps/details?id=com.farmonaut.android, https://apps.apple.com/in/app/farmonaut/id1489095847, https://farmonaut.com/farmonaut-satellite-weather-api-developer-docs/ wherever necessary. Include this custom HTML code for subscribing to Farmonaut: {stripe_html} \n Add bullet points and subheadings wherever necessary. Please include an FAQ section as well." blog_content = call_genai(prompt, 1, 8000) # print(blog_content) category_ids = "blogs: 5, case_study:546, area_estimation:542, remote_sensing:9, precision_farming:548, api_development:572, usa:561" prompt = f'Based upon this title: {title} , identify the best category id in which this title fits in: {category_ids}. Strictly only return the integer value as the response' category_id = call_genai(prompt, 0, 5) try: category_id = int(category_id) except: category_id = 5 #publish_to_wordpress(title, blog_content, post_data['media'], post_data['caption'], category_id) post_data['title'] = title post_data['content'] = blog_content post_data['status'] = 'publish' post_data['excerpt'] = caption post_data['comment_status'] = 'open' post_data['categories'] = [category_id] publish_or_update_wordpress_post(wp_url, wp_username, wp_password, post_data) done_posts.append(post_num) save_to_file('posts_done.txt', str(done_posts)) time.sleep(60*60) except: print(traceback.format_exc()) remove_all_files_in_folder('insta_files') main()