Skip to content

Bots and AI

This guide covers how to build automated bots and AI agents that interact with the Thrive Power List API. Use these patterns for automated scoring, data collection, and signal entry submission.

Getting Your API Key

To authenticate with the TPL API, you need an API key:

  1. Visit staging.power.thrive.xyz
  2. Click on your profile in the top right
  3. Navigate to the Guardians tab
  4. Copy your API Key

Keep this key secure. Do not commit it to version control or share it publicly.


Base Configuration

API Endpoints

EnvironmentBase URL
Staginghttps://staging.core.api.thrive.xyz
Productionhttps://core.api.thrive.xyz

Authentication

All requests require the X-API-KEY header:

X-API-KEY: YOUR_API_KEY

Python Bot Example

Setup

bash
pip install requests python-dotenv

Environment Configuration

Create a .env file:

env
TPL_API_KEY=your_api_key_here
TPL_BASE_URL=https://staging.core.api.thrive.xyz

Basic Bot Implementation

python
import os
import requests
import json
import time
from typing import Optional
from dotenv import load_dotenv

load_dotenv()

class TPLBot:
    def __init__(self):
        self.api_key = os.getenv("TPL_API_KEY")
        self.base_url = os.getenv("TPL_BASE_URL", "https://staging.core.api.thrive.xyz")

        if not self.api_key:
            raise ValueError("TPL_API_KEY environment variable is required")

        self.headers = {
            "X-API-KEY": self.api_key,
            "Content-Type": "application/json"
        }

    def get_signals(self) -> dict:
        """Fetch all available signals with their rubrics."""
        response = requests.get(
            f"{self.base_url}/tpl/signals",
            headers=self.headers
        )
        response.raise_for_status()
        return response.json()

    def get_missing_entries(self, signal_id: int) -> list:
        """Get projects that need scoring for a specific signal."""
        response = requests.get(
            f"{self.base_url}/tpl/signals/{signal_id}/signal_entries/missing",
            headers=self.headers
        )
        response.raise_for_status()
        return response.json()

    def get_project_details(self, project_id: int) -> dict:
        """Fetch full project information including team members."""
        response = requests.get(
            f"{self.base_url}/tpl/projects/{project_id}/details",
            headers=self.headers
        )
        response.raise_for_status()
        return response.json()

    def submit_signal_entry(
        self,
        signal_id: int,
        project_id: int,
        score: int,
        notes: str,
        subject_id: Optional[int] = None,
        subject_type: Optional[str] = None
    ) -> dict:
        """Submit a score for a project or subject."""
        payload = {
            "signal_entry": {
                "project_id": project_id,
                "score": score,
                "notes": notes
            }
        }

        if subject_id is not None:
            payload["signal_entry"]["subject_id"] = subject_id
            payload["signal_entry"]["subject_type"] = subject_type or "UserProject"

        response = requests.post(
            f"{self.base_url}/tpl/signals/{signal_id}/signal_entries",
            headers=self.headers,
            json=payload
        )
        response.raise_for_status()
        return response.json()

    def process_signal_queue(
        self,
        signal_id: int,
        scoring_function,
        rate_limit: float = 1.0
    ):
        """
        Process all missing entries for a signal.

        Args:
            signal_id: The signal to process
            scoring_function: A function that takes project details and returns (score, notes)
            rate_limit: Seconds to wait between requests
        """
        missing = self.get_missing_entries(signal_id)
        print(f"Found {len(missing)} projects to score")

        for entry in missing:
            project_id = entry["project_id"]
            subject_id = entry.get("subject_id")
            subject_type = entry.get("subject_type")

            try:
                details = self.get_project_details(project_id)
                score, notes = scoring_function(details)

                self.submit_signal_entry(
                    signal_id=signal_id,
                    project_id=project_id,
                    score=score,
                    notes=notes,
                    subject_id=subject_id,
                    subject_type=subject_type
                )

                print(f"Scored project {project_id}: {score}")

            except Exception as e:
                print(f"Error processing project {project_id}: {e}")

            time.sleep(rate_limit)


# Example usage
if __name__ == "__main__":
    bot = TPLBot()

    # Fetch available signals
    signals = bot.get_signals()
    print("Available signals:")
    for signal in signals["data"]["signals"]:
        print(f"  - {signal['id']}: {signal['key]}")

    # Example scoring function
    def example_scorer(project_details: dict) -> tuple[int, str]:
        project = project_details["data"]["project"]
        # Implement your scoring logic here
        return 3, f"Evaluated {project['name]}"

    # Process a signal queue
    # bot.process_signal_queue(signal_id=1, scoring_function=example_scorer)

TypeScript Bot Example

Setup

bash
npm install axios dotenv
npm install -D typescript @types/node

Environment Configuration

Create a .env file:

env
TPL_API_KEY=your_api_key_here
TPL_BASE_URL=https://staging.core.api.thrive.xyz

Basic Bot Implementation

typescript

dotenv.config();

interface Signal {
  id: number;
  key: string;
  rubric_mappings: Record<string, unknown>;
  review_instructions: string;
}

interface MissingEntry {
  project_id: number;
  subject_type: string | null;
  subject_id: number | null;
}

interface ProjectDetails {
  data: {
    project: {
      id: number;
      name: string;
      user_projects: Array<{
        subject_id: number;
        first_name: string;
        linkedin_url: string;
      }>;
      project_github_urls: string[];
      project_smart_contracts: string[];
    };
  };
}

interface SignalEntryPayload {
  signal_entry: {
    project_id: number;
    score: number;
    notes: string;
    subject_id?: number | null;
    subject_type?: string | null;
  };
}

class TPLBot {
  private client: AxiosInstance;

  constructor() {
    const apiKey = process.env.TPL_API_KEY;
    const baseURL = process.env.TPL_BASE_URL || 'https://staging.core.api.thrive.xyz';

    if (!apiKey) {
      throw new Error('TPL_API_KEY environment variable is required');
    }

    this.client = axios.create({
      baseURL,
      headers: {
        'X-API-KEY': apiKey,
        'Content-Type': 'application/json',
      },
    });
  }

  async getSignals(): Promise<{ data: { signals: Signal[] } }> {
    const response = await this.client.get('/tpl/signals');
    return response.data;
  }

  async getMissingEntries(signalId: number): Promise<MissingEntry[]> {
    const response = await this.client.get(
      `/tpl/signals/${signalId}/signal_entries/missing`
    );
    return response.data;
  }

  async getProjectDetails(projectId: number): Promise<ProjectDetails> {
    const response = await this.client.get(`/tpl/projects/${projectId}/details`);
    return response.data;
  }

  async submitSignalEntry(
    signalId: number,
    projectId: number,
    score: number,
    notes: string,
    subjectId?: number | null,
    subjectType?: string | null
  ): Promise<unknown> {
    const payload: SignalEntryPayload = {
      signal_entry: {
        project_id: projectId,
        score,
        notes,
      },
    };

    if (subjectId !== undefined && subjectId !== null) {
      payload.signal_entry.subject_id = subjectId;
      payload.signal_entry.subject_type = subjectType || 'UserProject';
    }

    const response = await this.client.post(
      `/tpl/signals/${signalId}/signal_entries`,
      payload
    );
    return response.data;
  }

  async processSignalQueue(
    signalId: number,
    scoringFunction: (details: ProjectDetails) => Promise<{ score: number; notes: string }>,
    rateLimitMs: number = 1000
  ): Promise<void> {
    const missing = await this.getMissingEntries(signalId);
    console.log(`Found ${missing.length} projects to score`);

    for (const entry of missing) {
      const { project_id, subject_id, subject_type } = entry;

      try {
        const details = await this.getProjectDetails(project_id);
        const { score, notes } = await scoringFunction(details);

        await this.submitSignalEntry(
          signalId,
          project_id,
          score,
          notes,
          subject_id,
          subject_type
        );

        console.log(`Scored project ${project_id}: ${score}`);
      } catch (error) {
        console.error(`Error processing project ${project_id}:`, error);
      }

      await this.sleep(rateLimitMs);
    }
  }

  private sleep(ms: number): Promise<void> {
    return new Promise((resolve) => setTimeout(resolve, ms));
  }
}

// Example usage
async function main() {
  const bot = new TPLBot();

  // Fetch available signals
  const signals = await bot.getSignals();
  console.log('Available signals:');
  for (const signal of signals.data.signals) {
    console.log(`  - ${signal.id}: ${signal.key}`);
  }

  // Example scoring function
  async function exampleScorer(
    projectDetails: ProjectDetails
  ): Promise<{ score: number; notes: string }> {
    const project = projectDetails.data.project;
    // Implement your scoring logic here
    return {
      score: 3,
      notes: `Evaluated ${project.name}`,
    };
  }

  // Process a signal queue
  // await bot.processSignalQueue(1, exampleScorer);
}

main().catch(console.error);

AI Integration Patterns

Using LLMs for Scoring

You can integrate language models to assist with scoring decisions:

python
import openai

def ai_assisted_scorer(project_details: dict, signal_rubric: dict) -> tuple[int, str]:
    """Use an LLM to help score based on project evidence."""
    project = project_details["data"]["project"]

    prompt = f"""
    Score this project based on the following rubric:
    {json.dumps(signal_rubric, indent=2)}

    Project details:
    - Name: {project['name']}
    - GitHub repos: {project.get('project_github_urls', [])}
    - Team members: {len(project.get('user_projects', []))}

    Provide a score from 1-5 and a brief justification.
    Format: SCORE: [number]
    NOTES: [justification]
    """

    response = openai.chat.completions.create(
        model="gpt-4",
        messages=[{"role": "user", "content": prompt}]
    )

    # Parse the response
    text = response.choices[0].message.content
    score = int(text.split("SCORE:")[1].split()[0])
    notes = text.split("NOTES:")[1].strip()

    return score, notes

Batch Processing

For large-scale scoring operations:

python
import asyncio
import aiohttp

async def batch_process_signals(bot: TPLBot, signal_ids: list[int]):
    """Process multiple signals concurrently."""
    tasks = []

    for signal_id in signal_ids:
        task = asyncio.create_task(
            process_single_signal(bot, signal_id)
        )
        tasks.append(task)

    results = await asyncio.gather(*tasks, return_exceptions=True)
    return results

Rate Limiting and Best Practices

Request Limits

Limit TypeRecommendation
POST requests30-120 per minute
GET requests60-240 per minute
Concurrent connections5-10 max

Error Handling

python
from requests.exceptions import HTTPError
import time

def retry_with_backoff(func, max_retries: int = 3):
    """Retry a function with exponential backoff."""
    for attempt in range(max_retries):
        try:
            return func()
        except HTTPError as e:
            if e.response.status_code >= 500:
                wait_time = (2 ** attempt)
                print(f"Server error, retrying in {wait_time}s...")
                time.sleep(wait_time)
            else:
                raise
    raise Exception("Max retries exceeded")

Audit Logging

Always maintain logs of your bot activity:

python
import csv
from datetime import datetime

def log_submission(project_id: int, score: int, success: bool, error: str = None):
    """Log each submission for auditing."""
    with open("submissions.csv", "a", newline="") as f:
        writer = csv.writer(f)
        writer.writerow([
            datetime.utcnow().isoformat(),
            project_id,
            score,
            success,
            error or ""
        ])

Security Considerations

  • Store API keys in environment variables, never in code
  • Use separate API keys for development and production
  • Implement rate limiting to avoid overloading the API
  • Log all submissions for audit purposes
  • Validate AI-generated scores before submission
  • Monitor for unusual patterns that might indicate errors

Growing crypto's GDP by automating treasury allocations to builders creating real value.