from flashlearn.skills.learn_skill import LearnSkill
from openai import OpenAI
from flashlearn.skills.general_skill import GeneralSkill
# Instantiate your pipeline "estimator" or "transformer" learner
learner = LearnSkill(model_name="gpt-4o-mini", client=OpenAI())
# Define your custom task instructions and sample data (if available)
skill = learner.learn_skill(
df=[], # You can pass an initial dataset here if needed
task=(
"Evaluate how likely the user is to buy my product based on the sentiment in their comment, "
"return an integer 1-100 on key 'likely_to_buy', and a short explanation on key 'reason'."
),
)
# Save the learned skill for future use
skill.save("evaluate_buy_comments_skill.json")
# Example input: list of dictionaries representing user comments
user_inputs = [
{"comment_text": "I love this product, it's everything I wanted!"},
{"comment_text": "Not impressed... wouldn't consider buying this."},
]
# To use the saved skill, load it and run the tasks in parallel
with open("evaluate_buy_comments_skill.json", "r", encoding="utf-8") as file:
definition = json.load(file)
# Load the previously saved skill
skill = GeneralSkill.load_skill(definition)
# Create tasks from the list of user inputs
tasks = skill.create_tasks(user_inputs)
# Run tasks in parallel and get structured results
results = skill.run_tasks_in_parallel(tasks)
print("Evaluation Results:")
print(results)
# Suppose 'flash_results' is the dictionary with structured LLM outputs
for idx, result in flash_results.items():
desired_score = result["likely_to_buy"]
reason_text = result["reason"]
# Now do something with the score and reason, e.g., store in DB or pass to next step
print(f"Comment #{idx} => Score: {desired_score}, Reason: {reason_text}")
Modular APIs that smoothly connect with your existing enterprise systems.
Provides uniform JSON outputs for seamless downstream integration
Handle 1,000 calls per second for large-scale operations.
Works with on-prem and popular providers like OpenAI, LiteLLM, and Ollama