Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.mavera.io/llms.txt

Use this file to discover all available pages before exploring further.

Scenario

Analyze what earns backlinks in your niche, then generate outreach emails per linking site type. Pull a competitor’s backlink overview and individual links, categorize domains (blog, news, directory, resource), extract brand voice, then generate email variants.

Architecture

Code

import os, requests, csv, io, time

SR, MV = os.environ["SEMRUSH_API_KEY"], os.environ["MAVERA_API_KEY"]
MB = "https://app.mavera.io/api/v1"
MH = {"Authorization": f"Bearer {MV}", "Content-Type": "application/json"}
COMP = "competitor.com"

ov = requests.get("https://api.semrush.com/analytics/v1/", params={
    "type": "backlinks_overview", "key": SR, "target": COMP,
    "target_type": "root_domain",
    "export_columns": "total,domains_num,urls_num,follows_num,nofollows_num",
}).text
ov_l = ov.strip().split("\n")
if len(ov_l) >= 2:
    print("Overview:", dict(zip(ov_l[0].split(";"), ov_l[1].split(";"))))

lr = requests.get("https://api.semrush.com/analytics/v1/", params={
    "type": "backlinks", "key": SR, "target": COMP,
    "target_type": "root_domain", "display_limit": 100,
    "display_sort": "page_ascore_desc",
    "export_columns": "source_url,source_title,target_url,anchor,external_num,internal_num,page_ascore",
})
reader = csv.reader(io.StringIO(lr.text), delimiter=";")
next(reader)
links = [{"url": r[0], "title": r[1], "anchor": r[3], "auth": int(r[6] or 0)}
         for r in reader if len(r) >= 7]

cats = {"blog": [], "news": [], "directory": [], "resource": [], "other": []}
for l in links:
    u, t = l["url"].lower(), l["title"].lower()
    if any(w in u for w in ["/blog","blog.","medium.com"]): cats["blog"].append(l)
    elif any(w in u+t for w in ["news","press","techcrunch"]): cats["news"].append(l)
    elif "directory" in u or "/list" in u: cats["directory"].append(l)
    elif any(w in t for w in ["resource","tools","guide"]): cats["resource"].append(l)
    else: cats["other"].append(l)

bv = requests.post(f"{MB}/brand-voices", headers=MH, json={
    "name": "Outreach Voice",
    "samples": ["Hi [Name], saw your post on [Topic] — we have a study that adds data. Open to a look?"],
    "description": "Casual, helpful, concise. No hard sells.",
}).json()

for cat, cl in cats.items():
    if not cl: continue
    block = "\n".join(f"- {l['title'][:50]} ({l['url'][:60]}) — \"{l['anchor']}\""
                      for l in cl[:5])
    gen = requests.post(f"{MB}/generations", headers=MH, json={
        "prompt": f"2 outreach emails for {cat} sites linking to competitor.\n\n"
                  f"SITES:\n{block}\n\n<150 words each, reference site, offer value, soft CTA.",
        "brand_voice_id": bv["id"],
    }).json()
    print(f"\n=== {cat} ({len(cl)} links) ===")
    print(gen.get("output", gen.get("content", ""))[:500])
    time.sleep(0.5)

Example Output

Overview: {total: 24853, domains: 1247, follow: 19201, nofollow: 5652}

=== blog (34 links) ===
Variant 1: Hi Sarah, your post on "Why Content Teams Need Better Feedback
Loops" resonated. We built synthetic focus groups for drafts. Worth a demo?
Variant 2: Your Roundup included AI writing tools but nothing on audience
testing. Our case study: pre-publish focus groups cut revisions by 60%.

Error Handling

Backlink calls consume 20-70 API units per request. Monitor your balance before full audits.
URL/title-based categorization won’t catch every case. Supplement with domain metadata or manual review for production.