import os, requests, time
# --- Auth setup same as Job 1 ---
COMPETITORS = ["Competitor A", "Competitor B", "Competitor C"]
SUBS = ["SaaS", "startups", "productivity", "marketing"]
# 1. Search each competitor across subreddits
all_results = []
for comp in COMPETITORS:
for sub in SUBS:
r = requests.get(f"{RD}/r/{sub}/search", headers=RD_H,
params={"q": comp, "restrict_sr": "true", "sort": "relevance", "t": "month", "limit": 10, "raw_json": 1})
if r.status_code == 429: time.sleep(int(r.headers.get("X-Ratelimit-Reset", 60))); continue
if not r.ok: continue
for p in r.json().get("data",{}).get("children",[]):
d = p["data"]
all_results.append({"competitor": comp, "subreddit": sub, "title": d.get("title",""),
"text": d.get("selftext","")[:400], "score": d.get("score",0), "post_id": d.get("id","")})
time.sleep(0.7)
# 2. Enrich top posts with comments
enriched = []
for item in sorted(all_results, key=lambda x: -x["score"])[:20]:
enriched.append(f"[/r/{item['subreddit']}] {item['competitor']} | score:{item['score']}\n{item['title']}\n{item['text'][:250]}")
cr = requests.get(f"{RD}/comments/{item['post_id']}", headers=RD_H,
params={"limit": 5, "depth": 1, "sort": "top", "raw_json": 1})
if cr.ok and len(cr.json()) > 1:
for c in cr.json()[1]["data"]["children"]:
body = c.get("data",{}).get("body","")
if body and body not in ("[removed]","[deleted]"):
enriched.append(f" COMMENT ({item['competitor']}): {body[:250]}")
time.sleep(0.7)
# 3. Structured analysis
analysis = requests.post(f"{MV_BASE}/mave/chat", headers=MV_H, json={
"message": f"Competitive intelligence analyst: analyze Reddit mentions.\n\nCOMPETITORS: {', '.join(COMPETITORS)}\n\nDATA:\n"
+ "\n".join(enriched[:30])
+ "\n\nFor EACH competitor: Sentiment breakdown (positive/negative/neutral with quotes), Top themes, Opportunities for us, Sentiment trend. End with comparative ranking."
}).json()
print(f"Collected {len(all_results)} mentions across {len(COMPETITORS)} competitors")
print(analysis.get("content", "")[:2000])