Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.mavera.io/llms.txt

Use this file to discover all available pages before exploring further.

Scenario

Competitors are discussed daily on Reddit. This job searches for mentions across subreddits, feeds text into Mavera Chat, and produces a structured sentiment breakdown: positive, negative, neutral, and opportunity. Run weekly.

Architecture

Code

import os, requests, time

# --- Auth setup same as Job 1 ---

COMPETITORS = ["Competitor A", "Competitor B", "Competitor C"]
SUBS = ["SaaS", "startups", "productivity", "marketing"]

# 1. Search each competitor across subreddits
all_results = []
for comp in COMPETITORS:
    for sub in SUBS:
        r = requests.get(f"{RD}/r/{sub}/search", headers=RD_H,
            params={"q": comp, "restrict_sr": "true", "sort": "relevance", "t": "month", "limit": 10, "raw_json": 1})
        if r.status_code == 429: time.sleep(int(r.headers.get("X-Ratelimit-Reset", 60))); continue
        if not r.ok: continue
        for p in r.json().get("data",{}).get("children",[]):
            d = p["data"]
            all_results.append({"competitor": comp, "subreddit": sub, "title": d.get("title",""),
                "text": d.get("selftext","")[:400], "score": d.get("score",0), "post_id": d.get("id","")})
        time.sleep(0.7)

# 2. Enrich top posts with comments
enriched = []
for item in sorted(all_results, key=lambda x: -x["score"])[:20]:
    enriched.append(f"[/r/{item['subreddit']}] {item['competitor']} | score:{item['score']}\n{item['title']}\n{item['text'][:250]}")
    cr = requests.get(f"{RD}/comments/{item['post_id']}", headers=RD_H,
        params={"limit": 5, "depth": 1, "sort": "top", "raw_json": 1})
    if cr.ok and len(cr.json()) > 1:
        for c in cr.json()[1]["data"]["children"]:
            body = c.get("data",{}).get("body","")
            if body and body not in ("[removed]","[deleted]"):
                enriched.append(f"  COMMENT ({item['competitor']}): {body[:250]}")
    time.sleep(0.7)

# 3. Structured analysis
analysis = requests.post(f"{MV_BASE}/mave/chat", headers=MV_H, json={
    "message": f"Competitive intelligence analyst: analyze Reddit mentions.\n\nCOMPETITORS: {', '.join(COMPETITORS)}\n\nDATA:\n"
        + "\n".join(enriched[:30])
        + "\n\nFor EACH competitor: Sentiment breakdown (positive/negative/neutral with quotes), Top themes, Opportunities for us, Sentiment trend. End with comparative ranking."
}).json()

print(f"Collected {len(all_results)} mentions across {len(COMPETITORS)} competitors")
print(analysis.get("content", "")[:2000])

Example Output

Collected 87 mentions across 3 competitors

## Competitor A — Mixed-Negative
- Positive (34%): Onboarding, integrations, support
- Negative (41%): Pricing ("doubled overnight"), performance
- Opportunity: Pricing transparency + dashboard speed

## Competitor B — Positive
- Positive (62%): UX, mobile, free tier
- Negative (18%): "Outgrows you — no enterprise features"
- Opportunity: Enterprise gap we can fill

Error Handling

Use quoted search or add product-category terms for precision.
N × M API calls. 700ms delays keep you under 100 req/min.

Reddit Integration

Mave Agent