Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.mavera.io/llms.txt

Use this file to discover all available pages before exploring further.

Scenario

Reddit AMAs are the internet’s richest interview format. This job collects common AMA questions, feeds them into Mavera Speak sessions with personas, and produces interview transcripts — deep qualitative research without recruiting.

Architecture

Code

import os, requests, time

# --- Auth setup same as Job 1 ---

SUB = "entrepreneur"

# 1. Find AMA posts + extract questions
r = requests.get(f"{RD}/r/{SUB}/search", headers=RD_H,
    params={"q": "AMA OR ask me anything", "restrict_sr": "true", "sort": "top", "t": "year", "limit": 15, "raw_json": 1})
r.raise_for_status()
ama_posts = [p["data"] for p in r.json()["data"]["children"]]

questions = []
for post in ama_posts[:10]:
    cr = requests.get(f"{RD}/comments/{post['id']}", headers=RD_H,
        params={"limit": 20, "depth": 1, "sort": "top", "raw_json": 1})
    if cr.ok and len(cr.json()) > 1:
        for c in cr.json()[1]["data"]["children"]:
            body = c.get("data",{}).get("body","")
            if body and "?" in body and len(body) < 500:
                questions.append(body.strip())
    time.sleep(0.7)

# 2. Curate via Mave
dedup = requests.post(f"{MV_BASE}/mave/chat", headers=MV_H, json={
    "message": f"Deduplicate these {len(questions)} AMA questions by theme. Return 8 best as numbered list.\n\n" +
               "\n".join(f"- {q[:200]}" for q in questions[:50])
}).json()
curated = [l.strip().lstrip("0123456789.) ") for l in dedup.get("content","").split("\n")
           if l.strip() and any(c.isalpha() for c in l)][:8]
if not curated:
    curated = ["What was the turning point?", "Biggest mistake?", "How do you decide when to pivot?",
               "What does your typical day look like?", "How did you get first 100 customers?",
               "Best advice you've received?", "What would you do differently?", "How do you handle imposter syndrome?"]

# 3. Personas + Speak
INTERVIEWEES = [
    {"name": "Bootstrapped SaaS Founder", "desc": "$5M ARR, no funding, team of 12. Profitability over growth."},
    {"name": "VC-Backed CEO", "desc": "Series A ($8M). Team of 40. Scaling fast. Board dynamics."},
    {"name": "Solopreneur / Creator", "desc": "$300K/year digital products. No employees. Freedom over scale."},
]
for p in INTERVIEWEES:
    persona = requests.post(f"{MV_BASE}/personas", headers=MV_H, json={
        "name": f"AMA: {p['name']}", "description": p["desc"],
    }).json()
    speak = requests.post(f"{MV_BASE}/speak", headers=MV_H, json={
        "persona_id": persona["id"],
        "input": [{"role": "system", "content": f"You are {p['name']} doing a Reddit AMA in /r/{SUB}. Answer candidly with examples."}]
                    + [{"role": "user", "content": q} for q in curated],
        "mode": "interview",
    }).json()
    print(f"\n{'='*60}\nAMA: {p['name']}\n{'='*60}")
    for ex in speak.get("exchanges", speak.get("messages", [])):
        if ex.get("role") == "user": print(f"\n  Q: {ex['content'][:150]}")
        elif ex.get("role") == "assistant": print(f"  A: {ex['content'][:350]}")
    time.sleep(0.3)

Example Output

AMA: Bootstrapped SaaS Founder
  Q: Turning point?  A: Canceling freemium. Support dropped 70%, conversion tripled.
  Q: Biggest mistake?  A: VP Sales at $800K ARR. Burned $180K. Should've hired SDRs first.

AMA: Solopreneur / Creator
  Q: First 100 customers?  A: Reddit. 6 months answering niche sub questions.
     "Here's what I built" thread → 40 sales week 1. No ads.

Error Handling

Broaden to “ask anything” or “Q&A” if results are sparse.
Split 8+ question interviews into two 4-question sessions.

Reddit Integration

Personas