import os, requests, time, base64
from collections import defaultdict
LV_KEY = os.environ["LEVER_API_KEY"]
MV = os.environ["MAVERA_API_KEY"]
LV_BASE = "https://api.lever.co/v1"
MV_BASE = "https://app.mavera.io/api/v1"
MV_H = {"Authorization": f"Bearer {MV}", "Content-Type": "application/json"}
lv_auth = base64.b64encode(f"{LV_KEY}:".encode()).decode()
LV_H = {"Authorization": f"Basic {lv_auth}"}
def lv_get(path, params=None):
r = requests.get(f"{LV_BASE}{path}", headers=LV_H, params=params or {})
if r.status_code == 429:
time.sleep(1)
return lv_get(path, params)
r.raise_for_status()
return r.json()
# 1. Pull stage definitions
stages_resp = lv_get("/stages")
stage_map = {s["id"]: s["text"] for s in stages_resp.get("data", [])}
# 2. Pull opportunities with stage info
opportunities = []
offset = None
while len(opportunities) < 500:
params = {"limit": 100, "expand": "stage"}
if offset:
params["offset"] = offset
resp = lv_get("/opportunities", params)
opportunities.extend(resp.get("data", []))
offset = resp.get("next")
if not offset:
break
time.sleep(0.15)
# 3. Group by current stage
stage_groups = defaultdict(list)
for opp in opportunities:
stage_id = opp.get("stage")
stage_name = stage_map.get(stage_id, "Unknown")
stage_groups[stage_name].append({
"name": opp.get("name", ""),
"headline": opp.get("headline", ""),
"origin": opp.get("origin", ""),
"sources": opp.get("sources", []),
})
# 4. Create personas per pipeline stage
persona_ids = []
for stage_name, opps in stage_groups.items():
if len(opps) < 3:
continue
headlines = list({o["headline"] for o in opps if o["headline"]})[:5]
origins = list({o["origin"] for o in opps if o["origin"]})[:3]
p = requests.post(f"{MV_BASE}/personas", headers=MV_H, json={
"name": f"Lever: {stage_name} Candidate",
"description": (
f"Candidate currently in '{stage_name}' stage. N={len(opps)}. "
f"Headlines: {', '.join(headlines[:3])}. Origins: {', '.join(origins)}."
),
"demographic": {"job_titles": headlines},
"psychographic": {
"pipeline_stage": stage_name,
"mindset": f"Candidate at {stage_name} — " + (
"just applied, low investment" if "new" in stage_name.lower()
else "deeply invested, high expectations" if "onsite" in stage_name.lower() or "offer" in stage_name.lower()
else "moderate engagement"
),
},
}).json()
persona_ids.append({"id": p["id"], "stage": stage_name, "n": len(opps)})
print(f"Persona: {p['id']} — {stage_name} ({len(opps)} candidates)")
time.sleep(0.3)
# 5. Run candidate experience Focus Group
PROCESS_DESCRIPTION = """Our interview process:
1. Application review (3-5 business days)
2. 30-min recruiter screen
3. 45-min hiring manager phone interview
4. 4-hour onsite (3 technical + 1 culture)
5. Offer within 48 hours of onsite decision"""
fg = requests.post(f"{MV_BASE}/focus-groups", headers=MV_H, json={
"name": "Candidate Experience by Pipeline Stage",
"persona_ids": [p["id"] for p in persona_ids],
"questions": [
{"type": "likert", "text": "Rate your overall experience at your current stage (1-5)", "scale": 5},
"What has been the most frustrating part of the process so far?",
"What communication would improve your experience right now?",
"Would you recommend this company to a friend based on your experience so far?",
"What's the #1 thing that would make you drop out of this process?",
],
"context": PROCESS_DESCRIPTION,
"responses_per_persona": 3,
}).json()
# 6. Poll
for _ in range(20):
time.sleep(5)
data = requests.get(f"{MV_BASE}/focus-groups/{fg['id']}", headers=MV_H).json()
if data.get("status") == "completed":
break
print(f"\nFocus Group: {fg['id']}")
for resp in data.get("responses", []):
stage = next((p["stage"] for p in persona_ids if p["id"] == resp.get("persona_id")), "?")
print(f"\n[{stage}] {resp.get('question','')[:60]}")
print(f" → {resp.get('answer','')[:250]}")