Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.mavera.io/llms.txt

Use this file to discover all available pages before exploring further.

Scenario

Your Greenhouse job postings go live without external validation. Are they clear? Appealing? Authentic? You pull active job postings from Greenhouse, extract the description and requirements, then run a Focus Group asking synthetic candidate personas to rate each posting on clarity, appeal, and authenticity using a Likert scale plus open-ended feedback. The output tells you which postings need rewriting before they cost you top candidates. Flow: Greenhouse GET /jobs → Active postings → Mavera POST /personas (candidate archetypes) → POST /focus-groups (Likert + open-ended) → Posting quality scores

Architecture

Code

import os, requests, time, base64

GH_KEY = os.environ["GREENHOUSE_API_KEY"]
MV = os.environ["MAVERA_API_KEY"]
GH_BASE = "https://harvest.greenhouse.io/v1"
MV_BASE = "https://app.mavera.io/api/v1"
MV_H = {"Authorization": f"Bearer {MV}", "Content-Type": "application/json"}

gh_auth = base64.b64encode(f"{GH_KEY}:".encode()).decode()
GH_H = {"Authorization": f"Basic {gh_auth}"}

# 1. Pull open jobs
jobs = requests.get(f"{GH_BASE}/jobs", headers=GH_H,
    params={"status": "open", "per_page": 50}).json()

# 2. Create target candidate personas
CANDIDATE_ARCHETYPES = [
    {"name": "Passive Senior Engineer", "desc": "Employed, not actively looking. 8+ years exp. Cares about impact, not perks."},
    {"name": "Active Mid-Level IC", "desc": "3-5 years exp, actively interviewing. Compares 5+ postings. Values clarity and growth."},
    {"name": "Career Changer", "desc": "Switching from adjacent field. Needs to understand if they qualify. Values inclusive language."},
]

persona_ids = []
for arch in CANDIDATE_ARCHETYPES:
    p = requests.post(f"{MV_BASE}/personas", headers=MV_H, json={
        "name": f"GH Candidate: {arch['name']}",
        "description": arch["desc"],
        "psychographic": {"job_seeking_status": arch["name"].split()[0].lower()},
    }).json()
    persona_ids.append(p["id"])
    time.sleep(0.2)

# 3. Rate each posting via Focus Group
results = []
for job in jobs[:10]:
    title = job.get("name", "Untitled")
    dept = (job.get("departments", [{}])[0] or {}).get("name", "N/A")
    office = (job.get("offices", [{}])[0] or {}).get("name", "Remote")

    content_parts = []
    for q in job.get("questions", []):
        content_parts.append(q.get("label", ""))
    job_content = job.get("notes", "") or "\n".join(content_parts)

    posting_detail = requests.get(f"{GH_BASE}/jobs/{job['id']}", headers=GH_H).json()
    description = ""
    for sec in posting_detail.get("content", {}).get("sections", []):
        description += f"\n{sec.get('title','')}\n{sec.get('body','')}"
    if not description:
        description = posting_detail.get("notes", "") or title

    fg = requests.post(f"{MV_BASE}/focus-groups", headers=MV_H, json={
        "name": f"Job Posting Review: {title}",
        "persona_ids": persona_ids,
        "questions": [
            {"type": "likert", "text": "Rate the CLARITY of this job posting (1=confusing, 5=crystal clear)", "scale": 5},
            {"type": "likert", "text": "Rate the APPEAL of this posting (1=would skip, 5=would apply immediately)", "scale": 5},
            {"type": "likert", "text": "Rate the AUTHENTICITY (1=corporate fluff, 5=genuine and believable)", "scale": 5},
            {"type": "open_ended", "text": "What about this posting would make you NOT apply?"},
            {"type": "open_ended", "text": "Rewrite the first sentence to make it more compelling."},
        ],
        "context": f"JOB POSTING: {title}\nDepartment: {dept} | Location: {office}\n\n{description[:2000]}",
        "responses_per_persona": 2,
    }).json()

    for _ in range(15):
        time.sleep(5)
        fg_data = requests.get(f"{MV_BASE}/focus-groups/{fg['id']}", headers=MV_H).json()
        if fg_data.get("status") == "completed":
            break

    scores = {"clarity": [], "appeal": [], "authenticity": []}
    for resp in fg_data.get("responses", []):
        q = resp.get("question", "").lower()
        val = resp.get("rating") or resp.get("score")
        if val:
            if "clarity" in q: scores["clarity"].append(val)
            elif "appeal" in q: scores["appeal"].append(val)
            elif "authenticity" in q: scores["authenticity"].append(val)

    avg = lambda lst: sum(lst)/len(lst) if lst else 0
    result = {
        "job": title, "dept": dept, "fg_id": fg["id"],
        "clarity": round(avg(scores["clarity"]), 1),
        "appeal": round(avg(scores["appeal"]), 1),
        "authenticity": round(avg(scores["authenticity"]), 1),
    }
    results.append(result)
    print(f"{title}: clarity={result['clarity']} appeal={result['appeal']} auth={result['authenticity']}")
    time.sleep(1)

# 4. Flag underperformers
for r in sorted(results, key=lambda x: x["appeal"]):
    flag = "⚠ REWRITE" if r["appeal"] < 3.0 else "✓ OK"
    print(f"  [{flag}] {r['job']}: C={r['clarity']} A={r['appeal']} Au={r['authenticity']}")

Example Output

{
  "postings_reviewed": 10,
  "results": [
    { "job": "Senior Backend Engineer", "clarity": 4.2, "appeal": 4.5, "authenticity": 3.8 },
    { "job": "Product Manager, Growth", "clarity": 2.8, "appeal": 2.3, "authenticity": 2.1 },
    { "job": "Data Scientist", "clarity": 4.0, "appeal": 3.9, "authenticity": 4.1 },
    { "job": "DevOps Engineer", "clarity": 3.1, "appeal": 2.7, "authenticity": 3.5 }
  ],
  "rewrite_candidates": [
    {
      "job": "Product Manager, Growth",
      "issue": "Passive Senior Engineer: 'This reads like a requirements dump. No vision for what the PM will actually own.'",
      "suggestion": "Career Changer: 'I can't tell if my marketing analytics background qualifies. List transferable skills.'"
    }
  ]
}

Error Handling

Greenhouse stores job descriptions in content.sections (array of {title, body}) or as flat notes. The code checks both. Some jobs have empty descriptions — skip these.
Job body often contains HTML. For better Mavera results, strip tags with a library like beautifulsoup4 (Python) or sanitize-html (Node) before sending.
Running a Focus Group per posting consumes credits. For 50+ postings, batch into groups of 5 and include all in a single Focus Group context.