Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.mavera.io/llms.txt

Use this file to discover all available pages before exploring further.

Scenario

Your best threads contain insights worth thousands of words but vanish after 48 hours. This job pulls a thread’s tweet chain, feeds it into Generate with a Blog Post template and brand voice. Polished long-form that preserves the thread’s energy.

Architecture

Code

import os, requests, time

X = os.environ["X_BEARER_TOKEN"]; MV = os.environ["MAVERA_API_KEY"]
X_BASE = "https://api.x.com/2"; MV_BASE = "https://app.mavera.io/api/v1"
X_H = {"Authorization": f"Bearer {X}"}
MV_H = {"Authorization": f"Bearer {MV}", "Content-Type": "application/json"}

TWEET_ID = "1234567890123456789"

# 1. Fetch first tweet + conversation_id
first = requests.get(f"{X_BASE}/tweets/{TWEET_ID}", headers=X_H,
    params={"tweet.fields": "conversation_id,created_at,public_metrics,author_id",
            "expansions": "author_id", "user.fields": "name,username"})
if first.status_code == 429:
    time.sleep(int(first.headers.get("x-rate-limit-reset", time.time()+60)) - int(time.time()))
    first = requests.get(f"{X_BASE}/tweets/{TWEET_ID}", headers=X_H,
        params={"tweet.fields": "conversation_id,created_at,public_metrics,author_id",
                "expansions": "author_id", "user.fields": "name,username"})
first.raise_for_status(); fd = first.json()
tweet = fd["data"]; conv_id = tweet.get("conversation_id", TWEET_ID)
author = fd.get("includes",{}).get("users",[{}])[0]
uname = author.get("username","unknown")

# 2. Fetch thread
chain = [tweet]; nt = None
for _ in range(5):
    params = {"query": f"conversation_id:{conv_id} from:{uname}", "max_results": 100,
        "tweet.fields": "created_at,public_metrics", "sort_order": "recency"}
    if nt: params["next_token"] = nt
    r = requests.get(f"{X_BASE}/tweets/search/recent", headers=X_H, params=params)
    if r.status_code == 429:
        time.sleep(int(r.headers.get("x-rate-limit-reset", time.time()+60)) - int(time.time()))
        r = requests.get(f"{X_BASE}/tweets/search/recent", headers=X_H, params=params)
    r.raise_for_status()
    for t in r.json().get("data",[]):
        if t["id"] != tweet["id"]: chain.append(t)
    nt = r.json().get("meta",{}).get("next_token")
    if not nt: break
    time.sleep(1)

chain.sort(key=lambda t: t.get("created_at",""))
thread_text = "\n\n".join(f"Tweet {i+1}/{len(chain)}:\n{t['text']}" for i, t in enumerate(chain))
likes = sum(t.get("public_metrics",{}).get("like_count",0) for t in chain)
print(f"Thread: {len(chain)} tweets | {likes:,} likes by @{uname}")

# 3. Brand voice + Generate
bv = requests.post(f"{MV_BASE}/brand-voices", headers=MV_H, json={
    "name": f"Thread Voice: @{uname}", "samples": [thread_text],
    "description": f"Voice from {len(chain)}-tweet thread, {likes:,} likes.",
}).json()

blog = requests.post(f"{MV_BASE}/generations", headers=MV_H, json={
    "brand_voice_id": bv["id"], "app_template": "blog_post",
    "prompt": f"Convert this thread into a blog post.\n\nTHREAD BY @{uname} ({len(chain)} tweets, {likes:,} likes):\n\n{thread_text}\n\n"
        "Title: SEO-friendly (<60 chars). Meta: <160 chars. Structure: intro → 4-6 H2 sections → conclusion + CTA. "
        "Length: 1,200-1,800 words. Preserve thread energy. Expand each tweet with examples and data. "
        "Include 2-3 placeholder internal links.",
}).json()
print(f"\n{'='*60}\nBLOG POST\n{'='*60}")
print(blog.get("output", blog.get("content",""))[:2500])

Example Output

Thread: 11 tweets | 4,832 likes by @yourCEO | Voice: bv_thread_ceo_9k4m

title: "Why We Killed Our Free Plan (And Revenue Tripled)"

# Why We Killed Our Free Plan (And Revenue Tripled)
Two months ago, we eliminated our free plan. The thread got 4,800 likes.

## The Free Plan Was Eating Us Alive — 70% of tickets, 0% revenue...
## What Happened in 30 Days — Conversion tripled, support dropped 70%...
## The Counterargument — "You're killing top-of-funnel." Wrong because...
→ [See how we track conversion](/features/analytics)

Error Handling

Conversation search with from:{username} isolates the author’s tweets. Sorted by created_at to preserve order.
Recent search covers 7 days only. For older threads, use full-archive (Pro tier) or pass saved text directly.
If output is shorter than 1,200 words, increase max_tokens or split generation into sections.

X / Twitter Integration

Brand Voice