import json
import os
import html
from huggingface_hub import InferenceClient
from moviepy.editor import VideoFileClip, concatenate_videoclips, vfx, AudioFileClip, CompositeAudioClip, ImageClip, TextClip, ColorClip, CompositeVideoClip
import requests
from datetime import datetime, timedelta
from tqdm import tqdm
from openai import OpenAI
import traceback
import time
import ast
from pydub import AudioSegment
import random
from gdrive_upload import upload_files_to_drive
from pathlib import Path
from mutagen.mp3 import MP3
import librosa
import soundfile as sf
import glob
import textwrap
import base64
# API keys and clients setup
hf2 = "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"
model_key = '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'
client = OpenAI(api_key=model_key)
relevant_links = '''
- Join Now Link: https://farmonaut.com/go
- App Link: https://farmonaut.com/app_redirect
- API: https://sat.farmonaut.com/api
- API Developer Docs:https://farmonaut.com/farmonaut-satellite-weather-api-developer-docs/
- Android App Link: https://play.google.com/store/apps/details?id=com.farmonaut.android
- iOS App Link: https://apps.apple.com/in/app/farmonaut/id1489095847
- Earn With Us Link: https://farmonaut.com/affiliate-program
- Facebook: https://www.facebook.com/farmonaut
- Instagram: https://www.instagram.com/farmonaut
- LinkedIn: https://www.linkedin.com/company/farmonaut
- Twitter: https://twitter.com/farmonaut
'''
youtube_description_guidelines = f'''
*Objective:* Generate an SEO-optimized YouTube video description that enhances visibility, engagement, and viewer retention.
*Instructions:*
1. **Hook (First 1–2 Sentences):**
- Begin with a compelling statement or question to grab attention.
- Incorporate the primary keyword early, as the initial 150–200 characters appear in search results and above the "Show More" fold.
- *Example:* "Discover 5 proven strategies to boost your YouTube channel's growth in 2025."
2. **Detailed Summary (200–500 Words):**
- Provide a comprehensive overview of the video's content.
- Naturally integrate the primary keyword 2–3 times.
- Include related keywords and synonyms to broaden search relevance.
- Ensure the content is original to avoid duplicate content issues.
- *Note:* A well-crafted summary aids YouTube's algorithm in understanding the video's context.
3. **Call-to-Action (CTA):**
- Encourage viewer interaction by prompting actions such as:
- "Subscribe for weekly marketing insights."
- "Download our free SEO checklist here: [link]"
4. **Relevant Links and Resources:**
- Provide links to:
- Your website or blog.
- Related videos or playlists.
- Social media profiles.
- Affiliate products or sponsors.
- Ensure all links are pertinent to the video's content to maintain viewer trust.
5. **Strategic Hashtags and Tags:**
- Incorporate 2–3 relevant hashtags at the end of your description, such as:
```
#YouTubeGrowth #ContentMarketing #SEO2025
```
- Avoid excessive tagging to prevent appearing spammy.
```
*Additional Guidelines:*
- Maintain a conversational tone to engage viewers.
- Ensure clarity and coherence throughout the description.
- Tailor the content to reflect the video's unique value proposition.
- Output only the caption, nothing else.
- Relevant Links: {relevant_links}
'''
youtube_long_narrational_guidelines = {
"narration_style": {
"tone": "Conversational and Relatable",
"description": "Use casual, friendly language that mimics real-life speech. Include colloquial expressions, modern slang, and maintain an influencer-style delivery with elements like uptalk or vocal fry.",
"examples": [
"Okay sooo… this just happened 😅",
"Not gonna lie, I wasn’t ready for this.",
"And yep, that’s definitely on fire now."
]
},
"structure": {
"descriptive_play_by_play": {
"enabled": True,
"description": "Narrate visuals in real-time. Add humor or clarity through reactions to actions as they happen.",
"example": "And there it goes—right into the fire. Classic."
},
"dramatic_or_theatrical": {
"enabled": True,
"description": "Use dramatic delivery for emotional or suspenseful moments. Include pauses and varied pacing.",
"example": "What happened next… no one saw coming."
},
"minimal_or_no_narration": {
"enabled": True,
"description": "When visuals are strong, reduce narration to key moments or reactions. Use silence strategically.",
"example": "[Silent moment] — then softly: 'Yeah. That happened.'"
}
},
"psychological_elements": {
"emotional_resonance": True,
"authenticity": True,
"storytelling_techniques": ["cliffhangers", "plot twists", "personal reflections"]
},
"tips": {
"know_your_audience": True,
"match_tone_to_content": True,
"keep_it_concise": True,
"ensure_high_quality_audio": True
},
"summary": "Narrate like a charismatic, relatable person reacting naturally to what’s happening on screen. Be emotionally authentic, sometimes funny, sometimes dramatic. Always engaging and never robotic."
}
#farmonaut_info
farmonaut_info = '''
Farmonaut is an agricultural technology company founded in 2018 that leverages satellite imagery, artificial intelligence (AI), and remote sensing to provide precision farming solutions accessible via Android, iOS, web platforms, and APIs. Farmonaut has supproted 200,000+ farmers in 50+ countries and has monitored 10 million hectares of land globally. Its platform enables farmers to monitor crop health in real-time using vegetation indices like NDVI, NDRE, and EVI, detect early signs of stress, assess soil moisture, and receive personalized advisories through its Jeevn AI system. By integrating satellite data with AI-driven analytics, Farmonaut assists farmers in making informed decisions on irrigation, fertilization, and pest control, leading to optimized resource use and improved yields. The platform supports features such as yield prediction, field boundary detection, and historical crop performance analysis, making it suitable for both small-scale growers and large agribusinesses. Farmonaut's services are designed to be affordable and user-friendly, aiming to democratize access to advanced agricultural technologies and promote sustainable farming practices globally.
'''
video_title_guidelines = {
"YouTubeTitleGuidelines": {
"Length": {
"Recommendation": "Keep titles under 60 characters to ensure full visibility across devices.",
"Rationale": "Long titles may be truncated, reducing clarity and impact."
},
"Clarity": {
"Recommendation": "Ensure the title accurately reflects the video's content.",
"Rationale": "Misleading titles can lead to viewer dissatisfaction and reduced engagement."
},
"KeywordOptimization": {
"Recommendation": "Include relevant keywords near the beginning of the title.",
"Rationale": "Front-loading keywords improves search visibility and click-through rates."
},
"EmotionalTriggers": {
"Recommendation": "Use words that evoke curiosity, excitement, or urgency.",
"Examples": ["Surprising", "Unbelievable", "Essential", "Life-Changing"],
"Rationale": "Emotional language can increase viewer interest and engagement."
},
"UseOfNumbers": {
"Recommendation": "Incorporate numbers to indicate lists or steps.",
"Examples": ["5 Tips", "Top 10", "3 Easy Steps"],
"Rationale": "Numbers provide clear expectations and can attract viewers seeking concise information."
},
"EnsureClickbait": {
"Recommendation": "Mandatorily use clickbaity titles.",
"Rationale": "Clickbait can increase cick through rate."
},
"CharacterCasing": {
"Recommendation": "Use capitalization sparingly to emphasize key words.",
"Rationale": "Excessive capitalization can appear unprofessional or aggressive."
},
"Hashtags": {
"Recommendation": "Include relevant hashtags at the end of the title if appropriate.",
"Rationale": "Hashtags can improve discoverability but should not clutter the main title."
},
"Examples": [
{
"Title": "Boost Your Productivity: 5 Proven Strategies",
"Explanation": "Combines a clear benefit with a numbered list to attract viewers seeking efficiency tips."
},
{
"Title": "How to Save Money Fast – 3 Simple Steps",
"Explanation": "Offers a straightforward solution with a step-by-step approach."
},
{
"Title": "The Secret to Effortless Cooking Revealed!",
"Explanation": "Creates curiosity by hinting at valuable information."
}
]
}
}
farmonaut_branding_style = '''
Based on Farmonaut’s mission, product offerings, and positioning as a leader in digital agriculture, the branding style should reflect innovation, trust, accessibility, and sustainability. Here are specific recommendations:
1. Visual Identity
Color Palette: Use earth tones to evoke agriculture, complemented by modern tech-inspired accents to signal innovation and digital transformation.
Typography: Choose clean, sans-serif fonts for clarity and approachability, ensuring readability for users of all backgrounds.
Imagery: Incorporate satellite imagery, farm landscapes, and data visualizations to visually reinforce Farmonaut’s expertise in satellite-based crop monitoring and AI-driven solutions.
'''
youtube_thumbnail_guidelines = '''
- **Expressive Faces**
- Description: Exaggerated human emotions
- Purpose: Create instant viewer connection and curiosity
- **Bold Colors**
- Description: High-contrast vibrant hues
- Purpose: Stand out against YouTube's interface and competitors
- **Minimalist Focus**
- Description: Single focal point with no visual clutter
- Purpose: Instant comprehension of video premise
- **Impactful Text**
- Description: 4-6 word bold phrases (e.g., "$100,000", "Survived 24 Hours")
- Purpose: Reinforce video stakes without overwhelming
- **Narrative Intrigue**
- Description: Visual storytelling or mystery elements
- Purpose: Compel clicks through curiosity
- **Consistent Branding**
- Description: Recurring color palettes, fonts, and visual motifs
- Purpose: Build instant recognition and trust
- **Strategic Composition**
- Description: Diagonal lines, pointing gestures, and eye-flow guidance
- Purpose: Direct attention to key elements
'''
thumbnail_text_guidelines = '''
How to Make Great YouTube Thumbnail Words (Just 4–6 Words!)
– Make It Clear
Use the most important words about your video
Pick words that are easy to read, even on a phone
Keep it short and simple
– Make People Curious
Give a little sneak peek—but don’t tell the whole story!
Ask a fun or surprising question
Use fun words like “amazing” or “weird” to make it exciting
– Match Your Video
Make sure your words actually match what your video is about
- Note: Make the text compulsorily click-bait. It is essential.
'''
import requests
import json
from datetime import datetime, timedelta
# Replace with your actual YouTube Data API key
API_KEY = 'AIzaSyAYgh53gd4HCBdGePtX6sTj7TJSzKk9hkk'
SEARCH_URL = 'https://www.googleapis.com/youtube/v3/search'
# Define the search parameters
SEARCH_TERMS = [
"technology",
"agriculture",
"forestry",
"soil health",
"geospatial"
]
# Calculate the time 24 hours ago in ISO 8601 format
published_after = (datetime.utcnow() - timedelta(days=1)).isoformat("T") + "Z"
# Function to fetch videos for a given search term
def fetch_videos(search_term):
params = {
'part': 'snippet',
'q': search_term,
'type': 'video',
'order': 'viewCount',
'publishedAfter': published_after,
'maxResults': 5,
'key': API_KEY
}
response = requests.get(SEARCH_URL, params=params)
if response.status_code == 200:
return response.json().get('items', [])
else:
print(f"Error fetching videos for '{search_term}': {response.status_code}")
return []
# Aggregate videos from all search terms
def get_trending_videos():
videos = []
seen_video_ids = set()
for term in SEARCH_TERMS:
items = fetch_videos(term)
for item in items:
video_id = item['id']['videoId']
if video_id not in seen_video_ids:
seen_video_ids.add(video_id)
title = item['snippet']['title']
description = item['snippet']['description']
videos.append({
'title': title,
'info': description
})
return videos
def merge_image_and_video(image_path, video_path, output_path):
from PIL import Image # required for getting original image size
# Load video to get its size and FPS
video = VideoFileClip(video_path)
video_width, video_height = video.size
fps = video.fps
# Get image size using PIL
with Image.open(image_path) as img:
img_width, img_height = img.size
# Calculate scale to fit the image inside video frame while keeping aspect ratio
scale = min(video_width / img_width, video_height / img_height)
new_width = int(img_width * scale)
new_height = int(img_height * scale)
# Create image clip with proper scaling and center fitting
image_clip = (
ImageClip(image_path)
.resize((new_width, new_height))
.set_duration(0.5)
.on_color(size=(video_width, video_height), color=(0, 0, 0), pos="center")
)
# Concatenate clips
final_clip = concatenate_videoclips([image_clip, video])
final_clip.write_videofile(output_path, codec="libx264", fps=fps)
def text_to_image(prompt, file_name, video_dimensions):
img = client.images.generate(
model="gpt-image-1",
prompt=prompt,
n=1,
size= video_dimensions,
quality="high",
output_format = "jpeg"
)
image_bytes = base64.b64decode(img.data[0].b64_json)
with open(file_name, "wb") as f:
f.write(image_bytes)
def thumbnail_prompt(video_type, words_for_thumbnail, summary):
prompt = f'''
#Write a prompt to Design a Youtube {video_type} Video Thumbnail, which includes the thumbnail text as well. The thumbnail text is provided below. Use the video thumbnail guidelines as provided below:
# - text_to_be_a_part_of_thumbnail: {words_for_thumbnail}
# - video_summary: {summary}
# - video_thumbnail_guidelines: {youtube_thumbnail_guidelines}
# - farmonaut_branding_style: {farmonaut_branding_style}
# - Important:
- explicitly decide who is this person going to be including their gender, profession, nationality: a farmer, businessman, young adult, person in technology etc.
# - explicitly decide what should be the emotion on the face based upon the video.
# - Note: Don't include Farmonaut logo in the thumbnail.
# - Note: Output only in a single paragraph.
# - Note: Don't give any conclusion or short summary anywhere or at the end of the paragraph.
# - Note: Output only the prompt. Don't output any other text.
'''
return prompt
def generate_thumbnails(summary):
words_for_thumbnail = call_genai(f'I am making a youtube video thumbnail image, which will have 4-5 words in it. Use the content and guidelines below to generate this text which will be a part of this youtube video thumbnail. Output only those words, nothing else. - content: {summary} - thumbnail_text_guidelines: {thumbnail_text_guidelines}')
text_to_image(thumbnail_prompt("Short", words_for_thumbnail, summary), (os.path.join(article_base_path, "output", "thumbnail_Short.jpg")), "1024x1536")
text_to_image(thumbnail_prompt("Long", words_for_thumbnail, summary), (os.path.join(article_base_path, "output", "thumbnail_Long.jpg")), "1536x1024")
def ensure_article_directories(base_dir, date_str, article_folder):
"""
base_dir: main articles directory (e.g., 'articles')
date_str: date string in YYYY-MM-DD format
article_folder: unique article folder name (e.g., '123456_Title')
"""
paths = [
os.path.join(base_dir, date_str, article_folder, "content"),
os.path.join(base_dir, date_str, article_folder, "assets/videos"),
os.path.join(base_dir, date_str, article_folder, "assets/audio"),
os.path.join(base_dir, date_str, article_folder, "assets/images"),
os.path.join(base_dir, date_str, article_folder, "output"),
]
for path in paths:
os.makedirs(path, exist_ok=True)
youtube_short_narrational_guidelines = youtube_long_narrational_guidelines
def split_paragraph(paragraph, segments=8):
# Split into words first
words = paragraph.split()
# How many words per segment (roughly)
words_per_segment = len(words) // segments
remainder = len(words) % segments
result = []
start = 0
for i in range(segments):
# Distribute the remainder across the first few segments
end = start + words_per_segment + (1 if i < remainder else 0)
segment = ' '.join(words[start:end])
result.append(segment)
start = end
return result
def merge_audio_video(video_path, audio_path, output_path=None, audio_start=0, keep_original_audio=False, audio_volume=1.0):
"""
Merge audio and video files using MoviePy.
Parameters:
-----------
video_path : str
Path to the video file
audio_path : str
Path to the audio file
output_path : str, optional
Path to save the output file. If None, creates a file with '_merged' suffix in the same directory
audio_start : float, optional
Time in seconds where the audio should start in the video (default: 0)
keep_original_audio : bool, optional
Whether to keep the original video audio (default: False)
audio_volume : float, optional
Volume level for the added audio (default: 1.0)
Returns:
--------
str
Path to the output video file
"""
try:
# Load video and audio clips
video_clip = VideoFileClip(video_path)
audio_clip = AudioFileClip(audio_path)
# Set audio start time and duration
audio_clip = audio_clip.set_start(audio_start)
# Clip audio to match video duration
max_audio_duration = max(0, video_clip.duration - audio_start)
if audio_clip.duration > max_audio_duration:
audio_clip = audio_clip.subclip(0, max_audio_duration)
# Set audio volume
audio_clip = audio_clip.volumex(audio_volume)
# Create composite audio if keeping original audio
if keep_original_audio and video_clip.audio is not None:
final_audio = CompositeAudioClip([video_clip.audio, audio_clip])
else:
final_audio = audio_clip
# Set the audio to the video clip
final_clip = video_clip.set_audio(final_audio)
# Generate output path if not provided
if output_path is None:
filename, ext = os.path.splitext(video_path)
output_path = f"{filename}_merged{ext}"
# Write the output file
final_clip.write_videofile(output_path, codec="libx264", audio_codec="aac")
# Close clips to free resources
video_clip.close()
audio_clip.close()
final_clip.close()
return output_path
except Exception as e:
print(f"Error merging audio and video: {e}")
return None
def update_video_speed(video_file_name, output_video_file_name, target_duration):
# Load your video
clip = VideoFileClip(video_file_name)
# Calculate the speed factor
speed_factor = clip.duration / target_duration
# Apply speed change
new_clip = clip.fx(vfx.speedx, factor=speed_factor)
# Write the result to a file
new_clip.write_videofile(output_video_file_name)
# Close clips
clip.close()
new_clip.close()
# In the generate_videos_from_json function
from concurrent.futures import ThreadPoolExecutor, as_completed
def generate_videos_from_json(video_prompts, video_dir_prefix, target_duration):
"""
Generate videos from prompts and merge them into a single video, saving all files in the given directory.
Args:
video_prompts: List of video prompt strings
video_dir_prefix: Path prefix for saving video files (e.g., .../assets/videos/Clean_Title)
target_duration: Target duration in seconds
Returns:
Path to the final merged video
"""
# Use the provided directory for all video files
temp_directory = os.path.dirname(video_dir_prefix)
os.makedirs(temp_directory, exist_ok=True)
# Initialize Hugging Face client
client = InferenceClient(
provider="fal-ai",
api_key="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",
)
def generate_segment(idx, prompt):
ai_model = "Wan-AI/Wan2.1-T2V-14B"
processed = 0
while processed == 0:
try:
print(f"Generating video {idx+1}/{len(video_prompts)}: '{prompt}'")
video_output = client.text_to_video(
prompt=prompt,
negative_prompt="Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, text, overlay text, subtitles",
num_frames=81,
guidance_scale=5.0,
model=ai_model
)
processed = 1
except:
print(traceback.format_exc())
time.sleep(30)
# Save to local storage
segment_path = f"{video_dir_prefix}_segment_{idx+1}.mp4"
with open(segment_path, "wb") as f:
f.write(video_output)
print(f"Video segment saved to {segment_path}")
return idx, segment_path
# Generate videos for each prompt in parallel (up to 5 at a time)
video_paths = [None] * len(video_prompts)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(generate_segment, i, prompt) for i, prompt in enumerate(video_prompts)]
for future in as_completed(futures):
idx, segment_path = future.result()
video_paths[idx] = segment_path
# Merge all videos into one
print("Merging video segments...")
video_clips = [VideoFileClip(path) for path in video_paths]
final_clip = concatenate_videoclips(video_clips)
merged_path = f"{video_dir_prefix}_merged.mp4"
final_clip.write_videofile(merged_path, fps = 16)
# Close video clips
for clip in video_clips:
clip.close()
final_clip.close()
# Adjust speed to match target duration
final_path = f"{video_dir_prefix}_final.mp4"
update_video_speed(merged_path, final_path, target_duration)
print(f"Final speed-adjusted video saved to {final_path}")
return final_path
def call_genai(prompt):
#model = "gpt-4.1"
model = "gpt-4.1-mini-2025-04-14"
completion = client.chat.completions.create(
model=model,
messages=[
{
"role": "user",
"content": prompt
}
]
)
# print(completion.choices[0].message.content)
return completion.choices[0].message.content
def generate_video_script(content, duration, video_segments, video_type):
if video_type == "long_video":
narration_guidelines = youtube_long_narrational_guidelines
total_words = int(2.8*duration)
elif video_type == "short_video":
narration_guidelines = youtube_short_narrational_guidelines
total_words = int(2.2*duration)
# print('total words', total_words)
prompt = f'''I am making a video of {duration} seconds duration based on the content below.
The video is divided into {video_segments} segments.
How these segments look like is provided in the segment_info below.
I want the video narration for my {duration} seconds video which will be spoken by a human as the audio of this video.
can you provide me the script for this.
return the script as a single paragraph.
Include how Farmonaut (the company which is making this video) can play a role in this.
Can you make this script into a super simple, relatable, and story-style format, so that even someone with no background in this topic can easily understand and stay engaged.
Only return the script. No other text.
- total_words_in_script:less than {total_words} words
- content: {content}
- segment_info: {video_segments}
- narration_guidelines: {narration_guidelines}
- information_about_farmonaut: {farmonaut_info}
- narration should be as if the narrator is from Farmonaut
- language of narration: English
'''
script = call_genai(prompt)
return script
def text_to_audio(prompt, file_name):
# Ensure the path exists
os.makedirs(os.path.dirname(file_name), exist_ok=True)
voices = ["ash", "ballad", "coral", "sage", "verse"]
voice_instructions = '''
Voice Affect:
- Sound engaging, warm, and confident.
- Use natural prosody: vary pitch and rhythm to emphasize important points.
- Avoid robotic or monotonous delivery.
Tone:
- Maintain a conversational yet professional tone.
- Use friendly, clear language.
- Adapt tone based on topic:
- Warm and empathetic for human-interest or lifestyle content.
- Neutral and objective for news or factual reporting.
Pacing:
- Speak at a moderate baseline (~150–160 words per minute).
- Increase pace for excitement or urgency (+10–20%).
- Slow down for complex or technical content (−15–25%).
- Use pacing variation to highlight contrast or conclusions.
Emotion:
- Enthusiastic for trends or viral content (smile in your voice).
- Measured and calm for news delivery.
- Calm and methodical for science or data-heavy material.
Pronunciation:
- Speak with clear and precise articulation.
- Emphasize and enunciate technical or unfamiliar words.
- Spell out acronyms on first use, then use short form.
Pauses:
- Insert a short pause (200–300 ms) after key points.
- Use a longer pause (400–600 ms) between major segments or topics.
- Use silence intentionally for structure and clarity.
'''
selected_voice = random.choice(voices)
with client.audio.speech.with_streaming_response.create(
model="gpt-4o-mini-tts",
voice=selected_voice,
input=prompt,
instructions=voice_instructions,
) as response:
response.stream_to_file(file_name)
def make_video_scene_prompts(content, x):
video_instruction = {
"prompt_guidelines": {
"structure": {
"formula": "Subject + Scene + Motion",
"components": {
"subject": "The main object or character (e.g., 'a knight', 'a dog', 'a futuristic robot')",
"scene": "The environment or setting (e.g., 'in a medieval city at dusk', 'on a rainbow', 'in a neon-lit alley')",
"motion": "The action or movement (e.g., 'rides a flying dragon', 'skateboarding', 'walking through the rain')"
},
"example": "A knight in shining armor rides a flying dragon over a medieval city at dusk, cinematic lighting, smooth camera pan."
},
"details": {
"add_cinematic_and_visual_details": [
"lighting (e.g., 'soft morning light', 'cinematic lighting', 'neon glow')",
"camera movement (e.g., 'smooth camera pan', 'zoom in', 'overhead shot')",
"style (e.g., 'realistic', 'cartoon', 'cyberpunk', 'Van Gogh style')",
"atmosphere (e.g., 'foggy', 'cheerful', 'mysterious')"
]
},
"focus_and_consistency": {
"one_scene_per_prompt": "Each prompt should focus on one subject, one scene, and one primary action.",
"avoid_mid_clip_changes": "Do not switch subjects or settings within a single prompt."
},
"negative_prompts": {
"usage": "Specify what you don't want (e.g., '--no blur, no text, no watermark') to reduce artifacts."
},
"advanced_dimensions": {
"shot_size": "e.g., 'close-up', 'wide shot'",
"angle": "e.g., 'low angle', 'bird’s-eye view'",
"lens_type": "e.g., 'fisheye', 'telephoto'",
"camera_movement": "e.g., 'tracking shot', 'dolly zoom'",
"speed_effects": "e.g., 'slow motion', 'time lapse'",
"atmosphere": "e.g., 'foggy', 'cheerful', 'mysterious'",
"style": "e.g., 'realistic', 'cartoon', 'cyberpunk', 'Van Gogh style'"
},
"generation_settings": {
"guidance_scale": "Use a moderate value (5–7). Too high causes flicker, too low causes drift.",
"diffusion_steps": "More steps sharpen details but take longer. Start moderate and adjust.",
"consistency_aids": "Use tools or workflows that improve frame-to-frame consistency if available."
},
"iteration": {
"refine_prompt": "Clarify or expand your prompt if the result isn’t as expected.",
"adjust_negative_prompts": "Remove unwanted elements as needed.",
"tweak_settings": "Adjust guidance and steps for smoother motion or sharper visuals."
},
"examples": [
{
"type": "Basic Formula",
"prompt": "A dog skateboarding on a rainbow, cartoon style, bright daylight, smooth camera tracking."
},
{
"type": "Cinematic Formula",
"prompt": "A detective walks through a neon-lit alley at night, rain falling, cinematic lighting, slow motion."
},
{
"type": "Artistic Formula",
"prompt": "A ballerina dances in a surreal dreamscape, pastel colors, soft focus, inspired by Monet."
}
],
"pro_tips": [
"Test your prompt as a single image before generating the video.",
"For complex scenes, generate separate clips and merge them in post-editing.",
"Explore community workflows or prompt libraries for inspiration."
]
}
}
prompt = f'''
"I need to create a video containing {x} scenes. All the scenes are 5 seconds long each. The video should be based on the given content and video_instructions.
For each segment, write a concise prompt that includes the relevant slice of the content.
Include how Farmonaut (the company which is making this video) can play a role.
Output all segment prompts as a comma-separated array. add more details into these scenes to make them more engaging and having the capability to hook the viewer.
Note: The array should only have the prompt text. nothing else."
Variables:
- content: {content}
- video_best_guidelines: {video_instruction}
- information_about_farmonaut: {farmonaut_info}
- language of narration: English
'''
return call_genai(prompt)
if __name__ == "__main__":
# WordPress API credentials (fill these in)
WORDPRESS_USERNAME = "ankuromar296"
WORDPRESS_PASSWORD = "Tjat A2hz 9XMv pXJi YbV0 GR8o"
def fetch_recent_wordpress_posts():
# Calculate 24 hours ago in ISO8601 format
after_time = (datetime.utcnow() - timedelta(days=3)).isoformat() + "Z"
url = f"https://www.farmonaut.com/wp-json/wp/v2/posts?after={after_time}&per_page=100"
response = requests.get(url, auth=(WORDPRESS_USERNAME, WORDPRESS_PASSWORD))
response.raise_for_status()
return response.json()
duration = 120
total_scenes = int(duration / 5)
reel_duration = 27
reel_total_scenes = int(reel_duration/1.12)
# --- TESTING: Use hardcoded segments instead of generating new ones ---
USE_HARDCODED_SEGMENTS = False # Set to False to revert to normal behavior
while True:
try:
posts = fetch_recent_wordpress_posts()
except Exception as e:
print(f"Error fetching WordPress posts: {e}")
time.sleep(60 * 10)
continue
post_count = 0
for post in tqdm(posts, desc="Processing WordPress blog posts"):
try:
post_count = post_count +1
# if post_count == 3:
# continue
# Extract content and title from WordPress post
content = post.get("content", {}).get("rendered", "No content available")
article_title = post.get("title", {}).get("rendered", "")
article_title = html.unescape(article_title)
print(article_title)
# Remove HTML tags if needed (optional, can use BeautifulSoup if desired)
# from bs4 import BeautifulSoup
# content = BeautifulSoup(content, "html.parser").get_text()
prompt = f'Output YES if the provided text is related to any of the following: meat /animals /mentions pakistan, palestine, north korea or iran /suicide, murder, homicide /sports /religion /about a school / an event/ exhibiton/ summit/ meeting / a place of historical significance / including politics, leaders or politicians. Only answer YES OR NO. Text: {content}'
is_advertisement = call_genai(prompt)
print('ad_status', is_advertisement)
if is_advertisement.lower() == "yes":
continue
is_in_domain = call_genai(f'Output YES if the provided text is related to any of the following a. agriculture, b. farming, c. forest. Only answer YES OR NO. Text: {content}')
print('domain_status', is_in_domain)
if is_in_domain.lower() == "no":
continue
content = call_genai(f'I am planning to make a video using the attached reference content. However, I want the video to be informational, general knowledge. Can you go through this content and convert it into an informational, general knowledge of about 500 words. Do not make any false or wrong claims about Farmonaut. Only return the content. No other text. - required language of output: English - Text content: {content}')
# Only now create folders and save files for articles that will have videos made
article_id = str(int(time.time()))
#if not article_title:
#article_title = call_genai(f'Translate this title into English and make it click-baity. Output only the title. No other text. - Text: {content}. - title: {article_title}')
article_title = call_genai(f'Can you make an interesting, catchy title for this article in less than 60 characters? I will use it for making a Youtube video. It will be published on Youtube channel of Farmonaut. The title should match with what Farmonaut is and does. Make it click-baity. it is mandatory to make it click-baity. The title should have the potential to attract viewers. Only return the title. - Text: {content}. - info_about_farmonaut: {farmonaut_info}. -title_guidelines: {video_title_guidelines} - required language of title: English')
print('title', article_title)
video_caption_prompt = youtube_description_guidelines + "\n-Current Description: " + content
video_caption = call_genai(video_caption_prompt)
#video_caption = call_genai(f'Farmonaut (an agritech startup) is planning to make a video on this topic. Can you provide a caption for this video in about 1000 characters with 5-10 relevant hashtags? Only return the caption. Include farmonaut.com/go link in the caption which viewers can use to join Farmonaut. No other text. - language of caption: English - Text content: {content}, - info_about_farmonaut: {farmonaut_info}, - title: {article_title}')
clean_title = "".join(c for c in article_title if c.isalnum() or c.isspace()).strip()
clean_title = clean_title.replace(" ", "_")[:50]
tweet = call_genai(f'Make a Twitter tweet from this text. Include 5-8 relevant trending hashtags as well. Output only the tweet. No other text. -content: {video_caption}')
date_str = datetime.today().strftime("%Y-%m-%d")
article_folder = f"{article_id}_{clean_title}"
base_dir = "articles"
# Create article-specific directories
ensure_article_directories(base_dir, date_str, article_folder)
article_base_path = os.path.join(base_dir, date_str, article_folder)
# Save article_title and video_caption to a single txt file in output folder
output_info_path = os.path.join(article_base_path, "output", "video_info.txt")
with open(output_info_path, "w") as info_f:
info_f.write(f"Title:\n{article_title}\n\nCaption:\n{video_caption}\n\nTweet:\n{tweet}\n")
# Save metadata
metadata = {
"id": article_id,
"title": article_title,
"clean_title": clean_title,
"date": date_str,
"source": "WordPress",
"url": post.get("link", ""),
"raw_article": post
}
with open(os.path.join(article_base_path, "metadata.json"), "w") as meta_f:
json.dump(metadata, meta_f, indent=2)
# Save content
with open(os.path.join(article_base_path, "content", "article.txt"), "w") as content_f:
content_f.write(content)
# Generate video prompts
video_prompts = make_video_scene_prompts(content, total_scenes)
video_prompts = ast.literal_eval(video_prompts)
# Generate script and audio
final_script = generate_video_script(content, duration, total_scenes, "long_video")
script_audio_file_name = os.path.join(article_base_path, "assets/audio", f"{clean_title}_{article_id}.mp3")
text_to_audio(final_script, script_audio_file_name)
# Get audio duration
audio = MP3(script_audio_file_name)
actual_duration = audio.info.length
generate_thumbnails(video_caption)
if USE_HARDCODED_SEGMENTS:
# Use segments from the hardcoded folder for testing
hardcoded_folder = "articles/2025-04-28/1745856204_Spring_Gardening_Tips_7_Essential_Steps_for_Manche/assets/videos"
segment_files = sorted(glob.glob(os.path.join(hardcoded_folder, "*_segment_*.mp4")))
if not segment_files:
print("No segments found in the hardcoded folder for testing.")
continue
# Merge segments into a main video
from moviepy.editor import VideoFileClip, concatenate_videoclips
video_clips = [VideoFileClip(path) for path in segment_files]
merged_path = os.path.join(article_base_path, "assets/videos", f"{clean_title}_merged.mp4")
final_clip = concatenate_videoclips(video_clips)
final_clip.write_videofile(merged_path, fps=16)
for clip in video_clips:
clip.close()
final_clip.close()
# Adjust speed to match target duration
final_path = os.path.join(article_base_path, "assets/videos", f"{clean_title}_final.mp4")
update_video_speed(merged_path, final_path, actual_duration)
final_video_path = final_path
# Use these for main video and reel
landscape_segment_paths = segment_files[:min(reel_total_scenes, len(segment_files))]
else:
# Generate and process main (landscape) video
final_video_path = generate_videos_from_json(
video_prompts,
os.path.join(article_base_path, "assets/videos", clean_title),
actual_duration
)
# Use the first 8 video segments (or fewer if not enough)
landscape_segment_paths = []
for i in range(min(reel_total_scenes, len(video_prompts))):
segment_path = os.path.join(article_base_path, "assets/videos", f"{clean_title}_segment_{i+1}.mp4")
if os.path.exists(segment_path):
landscape_segment_paths.append(segment_path)
# Add narration audio to the main video
main_narration_path = os.path.join(article_base_path, "output", f"{clean_title}_{article_id}_narration.mp4")
merge_audio_video(
video_path=final_video_path,
audio_path=script_audio_file_name,
output_path=main_narration_path,
audio_start=0,
keep_original_audio=False,
audio_volume=1.0
)
# main_final_path will be created after join_now transition and background audio merge
# --- Instagram Reel (portrait) generation ---
from moviepy.editor import CompositeVideoClip, TextClip, ColorClip
if not landscape_segment_paths:
print("No landscape segments found for Reel generation.")
else:
# 2. Process, clip, and speed up each segment before concatenation
processed_clips = []
for p in landscape_segment_paths:
clip = VideoFileClip(p)
# Clip to first 4 seconds (or less if the segment is shorter)
clipped = clip.subclip(0, min(1.5, clip.duration))
# Speed up so 4s → 3s (speed factor = 4/3 ≈ 1.333)
speeded = clipped.fx(vfx.speedx, factor=(4/3))
processed_clips.append(speeded)
# Do NOT close clip here; close after concatenation
concatenated_landscape = concatenate_videoclips(processed_clips)
# 3. Generate new short script and audio for the Reel
reel_script = generate_video_script(content, reel_duration, len(processed_clips), "short_video")
reel_script_audio_file_name = os.path.join(article_base_path, "assets/audio", f"{clean_title}_{article_id}_reel.mp3")
text_to_audio(reel_script, reel_script_audio_file_name)
# 4. Create portrait canvas and overlays
# Get narration text split by time (for dynamic audio text)
from mutagen.mp3 import MP3
reel_audio = MP3(reel_script_audio_file_name)
reel_actual_duration = reel_audio.info.length
# For simplicity, split script into N segments for N video segments
script_lines = reel_script.split('. ')
if len(script_lines) < len(processed_clips):
script_lines += [''] * (len(processed_clips) - len(script_lines))
# Prepare portrait canvas
portrait_w, portrait_h = 720, 1280
# Center landscape video vertically
landscape_w, landscape_h = processed_clips[0].size
y_offset = (portrait_h - landscape_h) // 2
# Prepare overlays
logo_path = os.path.join("watermarks", "farmonaut_logo.png")
footer_path = os.path.join("watermarks", "footer.png")
# Farmonaut logo at top
logo_height = int(portrait_h * 0.1)
logo_clip = (ImageClip(logo_path)
.resize(width=portrait_w, height=logo_height)
.set_position(("center", 0))
.set_duration(concatenated_landscape.duration))
# Title below logo, above video
title_clip = (TextClip(article_title, fontsize=40, color='white', font='Arial-Bold', size=(portrait_w, None), method='caption')
.set_position(("center", logo_height + 10))
.set_duration(concatenated_landscape.duration))
# Landscape video centered
landscape_clip = (concatenated_landscape
.set_position(("center", y_offset))
.resize(width=landscape_w, height=landscape_h))
# Portrait background
bg_clip = (ColorClip(size=(portrait_w, portrait_h), color=(0, 0, 0))
.set_duration(concatenated_landscape.duration))
# Composite all (footer will be added only in watermarking step)
composite_clips = [bg_clip, landscape_clip, logo_clip, title_clip]
final_reel_clip = CompositeVideoClip(composite_clips, size=(portrait_w, portrait_h))
# Set audio
final_reel_clip = final_reel_clip.set_audio(AudioFileClip(reel_script_audio_file_name))
# Write to file (no audio)
reel_noaudio_path = os.path.join(article_base_path, "output", f"{clean_title}_{article_id}_reel_noaudio.mp4")
final_reel_clip.write_videofile(reel_noaudio_path, fps=24, codec="libx264", audio_codec="aac")
# Adjust reel video speed to match narration audio duration
reel_speedadjusted_path = os.path.join(article_base_path, "output", f"{clean_title}_{article_id}_reel_noaudio_speedadjusted.mp4")
update_video_speed(reel_noaudio_path, reel_speedadjusted_path, reel_actual_duration)
# Add narration audio to the speed-adjusted reel video
reel_narration_path = os.path.join(article_base_path, "output", f"{clean_title}_{article_id}_reel_narration.mp4")
merge_audio_video(
video_path=reel_speedadjusted_path,
audio_path=reel_script_audio_file_name,
output_path=reel_narration_path,
audio_start=0,
keep_original_audio=False,
audio_volume=1.0
)
# reel_final_path will be created after join_now_reel transition and background audio merge
# Clean up
final_reel_clip.close()
for c in processed_clips:
c.close()
from moviepy.editor import VideoFileClip, concatenate_videoclips, vfx
# --- Main video join_now.mp4 ---
join_now_path = "watermarks/join_now.mp4"
final_with_joinnow_path = os.path.join(article_base_path, "output", f"{clean_title}_{article_id}_with_bg_joinnow.mp4")
fade_duration = 1 # seconds
try:
with VideoFileClip(main_narration_path) as main_clip, VideoFileClip(join_now_path).without_audio() as join_clip:
# Apply fade out to the end of the main video (video only)
main_clip_faded = main_clip.fx(vfx.fadeout, fade_duration)
# Fade out main_clip's audio during the last fade_duration seconds (do not cut audio early)
if main_clip.audio:
main_audio = main_clip.audio.audio_fadeout(fade_duration)
main_clip_faded = main_clip_faded.set_audio(main_audio)
# Apply fade in to the start of the join_now video (video only, no audio)
join_clip_faded = join_clip.fx(vfx.fadein, fade_duration)
# Concatenate with a smooth transition
final_with_joinnow = concatenate_videoclips([main_clip_faded, join_clip_faded], method="compose")
# Force audio to match video duration
if final_with_joinnow.audio:
final_with_joinnow = final_with_joinnow.set_audio(final_with_joinnow.audio.subclip(0, final_with_joinnow.duration))
final_with_joinnow.write_videofile(final_with_joinnow_path, codec="libx264", audio_codec="aac")
final_with_joinnow.close()
except Exception as e:
print(f"Error appending join_now.mp4 with fade transition: {e}")
# --- Reel video join_now_reel.mp4 ---
join_now_reel_path = "watermarks/join_now_reel.mp4"
final_with_joinnow_reel_path = os.path.join(article_base_path, "output", f"{clean_title}_{article_id}_reel_with_bg_joinnow.mp4")
try:
with VideoFileClip(reel_narration_path) as main_clip, VideoFileClip(join_now_reel_path).without_audio() as join_clip:
# Apply fade out to the end of the main video (video only)
main_clip_faded = main_clip.fx(vfx.fadeout, fade_duration)
# Trim main_clip's audio to its (possibly faded) duration
if main_clip.audio:
main_audio = main_clip.audio.audio_fadeout(fade_duration)
main_clip_faded = main_clip_faded.set_audio(main_clip.audio.subclip(0, main_clip.duration))
# Apply fade in to the start of the join_now_reel video (video only, no audio)
join_clip_faded = join_clip.fx(vfx.fadein, fade_duration)
# Concatenate with a smooth transition
final_with_joinnow_reel = concatenate_videoclips([main_clip_faded, join_clip_faded], method="compose")
if final_with_joinnow_reel.audio:
final_with_joinnow_reel = final_with_joinnow_reel.set_audio(final_with_joinnow_reel.audio.subclip(0, final_with_joinnow_reel.duration))
final_with_joinnow_reel.write_videofile(final_with_joinnow_reel_path, codec="libx264", audio_codec="aac")
final_with_joinnow_reel.close()
except Exception as e:
print(f"Error appending join_now_reel.mp4 with fade transition: {e}")
# Add background audio (randomly selected) after join_now transitions
bg_audio_files = glob.glob("bg_audio/*.mp3")
if not bg_audio_files:
print("No background audio files found in bg_audio/. Skipping background audio merge.")
selected_bg_audio = None
else:
selected_bg_audio = random.choice(bg_audio_files)
print(f"Adding background audio ({selected_bg_audio}) after join_now transitions")
main_final_path = os.path.join(article_base_path, "output", f"{clean_title}_{article_id}_final.mp4")
if selected_bg_audio:
merge_audio_video(
video_path=final_with_joinnow_path,
audio_path=selected_bg_audio,
output_path=main_final_path,
audio_start=0,
keep_original_audio=True,
audio_volume=0.05
)
else:
# Fallback: just copy the video without background audio
import shutil
shutil.copyfile(final_with_joinnow_path, main_final_path)
reel_final_path = os.path.join(article_base_path, "output", f"{clean_title}_{article_id}_reel_final.mp4")
if selected_bg_audio:
merge_audio_video(
video_path=final_with_joinnow_reel_path,
audio_path=selected_bg_audio,
output_path=reel_final_path,
audio_start=0,
keep_original_audio=True,
audio_volume=0.05
)
else:
import shutil
shutil.copyfile(final_with_joinnow_reel_path, reel_final_path)
print(f"Final video created at: {main_final_path}")
print(f"Final REEL video created at: {reel_final_path}")
# Add watermarks to the final video
try:
def add_watermarks_to_video(video_path, logo_path, footer_path, is_reel=False):
from moviepy.editor import VideoFileClip, ImageClip, CompositeVideoClip
import os
video = VideoFileClip(video_path)
w, h = video.size
if is_reel:
# Farmonaut logo: full width, dynamic height (e.g., 10% of video height), top center
logo = ImageClip(logo_path)
logo_height = int(h * 0.1)
logo = logo.resize(width=w, height=logo_height)
logo = logo.set_position(("center", "top")).set_start(0).set_duration(video.duration)
else:
# Farmonaut logo: 10% of video height, top right (original logic)
logo = ImageClip(logo_path)
logo_height = int(h * 0.1)
logo = logo.resize(height=logo_height)
logo = logo.set_position(("right", "top")).set_start(0).set_duration(video.duration)
# Footer: full width, bottom
footer = ImageClip(footer_path)
footer = footer.resize(width=w)
footer_h = footer.size[1]
footer = footer.set_position(("center", h - footer_h)).set_start(0).set_duration(video.duration)
# Composite
final = CompositeVideoClip([video, logo, footer])
# Always output to a new file with _watermarked before extension
base, ext = os.path.splitext(video_path)
output_path = f"{base}_watermarked{ext}"
final.write_videofile(output_path, codec="libx264", audio_codec="aac")
video.close()
logo.close()
footer.close()
final.close()
return output_path
watermarks_dir = "watermarks"
logo_path = os.path.join(watermarks_dir, "farmonaut_logo.png")
footer_path = os.path.join(watermarks_dir, "footer.png")
# Main video watermarking
watermarked_path = add_watermarks_to_video(
main_final_path,
logo_path,
footer_path,
is_reel=False
)
print(f"Watermarks added to: {watermarked_path}")
# Reel video watermarking
watermarked_reel_path = add_watermarks_to_video(
reel_final_path,
logo_path,
footer_path,
is_reel=True
)
print(f"Watermarks added to REEL: {watermarked_reel_path}")
# Merge thumbnail_short with watermarked reel and upload all required files to Google Drive
try:
# Create a uniquely named copy of video_info.txt
unique_info_name = f"{clean_title}_{article_id}_video_info.txt"
unique_info_path = os.path.join(article_base_path, "output", unique_info_name)
import shutil
shutil.copyfile(output_info_path, unique_info_path)
# Paths for thumbnails
thumbnail_short_path = os.path.join(article_base_path, "output", "thumbnail_Short.jpg")
thumbnail_long_path = os.path.join(article_base_path, "output", "thumbnail_Long.jpg")
# Path for merged reel
merged_reel_path = os.path.join(article_base_path, "output", f"{clean_title}_{article_id}_reel_final_merged.mp4")
# Merge thumbnail_short with watermarked reel
merge_image_and_video(
thumbnail_short_path,
watermarked_reel_path,
merged_reel_path
)
upload_results = upload_files_to_drive(
[watermarked_path, unique_info_path, merged_reel_path, thumbnail_short_path, thumbnail_long_path],
date_str=date_str,
article_folder=article_folder
)
print("Google Drive upload results:")
for f, link in upload_results.items():
print(f"{f}: {link}")
# Optionally, remove the temp unique info file after upload
try:
os.remove(unique_info_path)
except Exception as cleanup_e:
print(f"Warning: Could not remove temp file {unique_info_path}: {cleanup_e}")
except Exception as e:
print(f"Error uploading to Google Drive: {e}")
except Exception as e:
print(f"Error adding watermarks: {traceback.format_exc()}")
except Exception as e:
print(f"Error processing post: {traceback.format_exc()}")
continue
print(f"All Processing Complete...")
time.sleep(60 * 60 * 60)