import os, requests, time, base64
GH_KEY = os.environ["GREENHOUSE_API_KEY"]
MV = os.environ["MAVERA_API_KEY"]
GH_BASE = "https://harvest.greenhouse.io/v1"
MV_BASE = "https://app.mavera.io/api/v1"
MV_H = {"Authorization": f"Bearer {MV}", "Content-Type": "application/json"}
gh_auth = base64.b64encode(f"{GH_KEY}:".encode()).decode()
GH_H = {"Authorization": f"Basic {gh_auth}"}
# 1. Pull open jobs
jobs = requests.get(f"{GH_BASE}/jobs", headers=GH_H,
params={"status": "open", "per_page": 50}).json()
# 2. Create target candidate personas
CANDIDATE_ARCHETYPES = [
{"name": "Passive Senior Engineer", "desc": "Employed, not actively looking. 8+ years exp. Cares about impact, not perks."},
{"name": "Active Mid-Level IC", "desc": "3-5 years exp, actively interviewing. Compares 5+ postings. Values clarity and growth."},
{"name": "Career Changer", "desc": "Switching from adjacent field. Needs to understand if they qualify. Values inclusive language."},
]
persona_ids = []
for arch in CANDIDATE_ARCHETYPES:
p = requests.post(f"{MV_BASE}/personas", headers=MV_H, json={
"name": f"GH Candidate: {arch['name']}",
"description": arch["desc"],
"psychographic": {"job_seeking_status": arch["name"].split()[0].lower()},
}).json()
persona_ids.append(p["id"])
time.sleep(0.2)
# 3. Rate each posting via Focus Group
results = []
for job in jobs[:10]:
title = job.get("name", "Untitled")
dept = (job.get("departments", [{}])[0] or {}).get("name", "N/A")
office = (job.get("offices", [{}])[0] or {}).get("name", "Remote")
content_parts = []
for q in job.get("questions", []):
content_parts.append(q.get("label", ""))
job_content = job.get("notes", "") or "\n".join(content_parts)
posting_detail = requests.get(f"{GH_BASE}/jobs/{job['id']}", headers=GH_H).json()
description = ""
for sec in posting_detail.get("content", {}).get("sections", []):
description += f"\n{sec.get('title','')}\n{sec.get('body','')}"
if not description:
description = posting_detail.get("notes", "") or title
fg = requests.post(f"{MV_BASE}/focus-groups", headers=MV_H, json={
"name": f"Job Posting Review: {title}",
"persona_ids": persona_ids,
"questions": [
{"type": "likert", "text": "Rate the CLARITY of this job posting (1=confusing, 5=crystal clear)", "scale": 5},
{"type": "likert", "text": "Rate the APPEAL of this posting (1=would skip, 5=would apply immediately)", "scale": 5},
{"type": "likert", "text": "Rate the AUTHENTICITY (1=corporate fluff, 5=genuine and believable)", "scale": 5},
{"type": "open_ended", "text": "What about this posting would make you NOT apply?"},
{"type": "open_ended", "text": "Rewrite the first sentence to make it more compelling."},
],
"context": f"JOB POSTING: {title}\nDepartment: {dept} | Location: {office}\n\n{description[:2000]}",
"responses_per_persona": 2,
}).json()
for _ in range(15):
time.sleep(5)
fg_data = requests.get(f"{MV_BASE}/focus-groups/{fg['id']}", headers=MV_H).json()
if fg_data.get("status") == "completed":
break
scores = {"clarity": [], "appeal": [], "authenticity": []}
for resp in fg_data.get("responses", []):
q = resp.get("question", "").lower()
val = resp.get("rating") or resp.get("score")
if val:
if "clarity" in q: scores["clarity"].append(val)
elif "appeal" in q: scores["appeal"].append(val)
elif "authenticity" in q: scores["authenticity"].append(val)
avg = lambda lst: sum(lst)/len(lst) if lst else 0
result = {
"job": title, "dept": dept, "fg_id": fg["id"],
"clarity": round(avg(scores["clarity"]), 1),
"appeal": round(avg(scores["appeal"]), 1),
"authenticity": round(avg(scores["authenticity"]), 1),
}
results.append(result)
print(f"{title}: clarity={result['clarity']} appeal={result['appeal']} auth={result['authenticity']}")
time.sleep(1)
# 4. Flag underperformers
for r in sorted(results, key=lambda x: x["appeal"]):
flag = "⚠ REWRITE" if r["appeal"] < 3.0 else "✓ OK"
print(f" [{flag}] {r['job']}: C={r['clarity']} A={r['appeal']} Au={r['authenticity']}")