Skip to content

Commit 4b2dc59

Browse files
authored
Merge pull request #2 from plexe-ai/update
feat: Update lib to match Plexe endpoints
2 parents 60bfed3 + f07106f commit 4b2dc59

5 files changed

Lines changed: 204 additions & 109 deletions

File tree

‎README.md‎

Lines changed: 103 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,72 +1,147 @@
1+
<div align="center">
2+
13
# PlexeAI
24

3-
Create ML models from natural language descriptions. Upload your data and describe your ML problem - PlexeAI handles the rest.
5+
<img src="docs/img/plexe-logo.svg" alt="PlexeAI Logo" width="100" height="100"/>
6+
7+
### Create ML models from natural language descriptions
8+
9+
[![PyPI version](https://badge.fury.io/py/plexeai.svg)](https://badge.fury.io/py/plexeai)
10+
[![Python Versions](https://img.shields.io/pypi/pyversions/plexeai.svg)](https://pypi.org/project/plexeai/)
11+
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
12+
13+
</div>
14+
15+
---
416

5-
## Install
17+
## 🚀 Features
18+
19+
- 🤖 **AI-Powered Model Creation** - Build ML models using natural language descriptions
20+
- 📊 **Automated Training** - Upload your data and let PlexeAI handle the rest
21+
-**Async Support** - Built-in async interfaces for high-performance applications
22+
- 🔄 **Batch Processing** - Efficient batch prediction capabilities
23+
- 🛠️ **Simple API** - Intuitive interface for both beginners and experts
24+
25+
## 📦 Installation
626

727
```bash
8-
pip install plexeai
28+
pip install plexe
929
```
1030

11-
## Usage
31+
## 🏃‍♂️ Quickstart
1232

1333
```python
14-
import plexeai
34+
import plexe
1535

16-
# Set via environment or pass to functions
17-
# export PLEXE_API_KEY=your_api_key_here
18-
19-
# Build a model
20-
experiment_id = plexeai.build(
36+
# Create a model in seconds
37+
model_version = plexe.build(
2138
goal="predict customer churn based on usage patterns",
22-
data_files="customer_data.csv", # Single file or list of files
23-
steps=3 # Number of improvement iterations
39+
model_name="churn-predictor",
40+
data_files="customer_data.csv"
2441
)
2542

26-
# Check build status
27-
status = plexeai.get_status(experiment_id)
28-
# {"status": "completed", "result": {...}}
29-
3043
# Make predictions
31-
result = plexeai.infer(
32-
experiment_id=experiment_id,
44+
result = plexe.infer(
45+
model_name="churn-predictor",
46+
model_version=model_version,
3347
input_data={
3448
"usage": 100,
3549
"tenure": 12,
3650
"plan_type": "premium"
3751
}
3852
)
53+
```
54+
55+
## 🎯 Example Use Cases
56+
57+
- 📈 **Churn Prediction**: Predict customer churn using historical data
58+
- 🏷️ **Classification**: Categorize text, images, or any structured data
59+
- 📊 **Regression**: Predict numerical values like sales or pricing
60+
- 🔄 **Time Series**: Forecast trends and patterns in sequential data
61+
62+
## 🔥 Advanced Usage
63+
64+
### Batch Predictions
3965

40-
# Batch predictions
41-
results = plexeai.batch_infer(
42-
experiment_id=experiment_id,
66+
```python
67+
results = plexe.batch_infer(
68+
model_name="churn-predictor",
69+
model_version=model_version,
4370
inputs=[
4471
{"usage": 100, "tenure": 12, "plan_type": "premium"},
4572
{"usage": 50, "tenure": 6, "plan_type": "basic"}
4673
]
4774
)
75+
```
4876

49-
# Async support
77+
### Async Support
78+
79+
```python
5080
async def main():
51-
experiment_id = await plexeai.abuild(
81+
model_version = await plexe.abuild(
5282
goal="predict customer churn",
83+
model_name="churn-predictor",
5384
data_files="customer_data.csv"
5485
)
55-
result = await plexeai.ainfer(
56-
experiment_id=experiment_id,
86+
87+
result = await plexe.ainfer(
88+
model_name="churn-predictor",
89+
model_version=model_version,
5790
input_data={"usage": 100, "tenure": 12}
5891
)
5992
```
6093

61-
## Local Development
94+
### Direct Client Usage
95+
96+
```python
97+
from plexe import PlexeAI
98+
99+
with PlexeAI(api_key="your_api_key_here") as client:
100+
# Upload data
101+
upload_id = client.upload_files("customer_data.csv")
102+
103+
# Create and use model
104+
model_version = client.build(
105+
goal="predict customer churn",
106+
model_name="churn-predictor",
107+
upload_id=upload_id
108+
)
109+
```
110+
111+
## 📚 Documentation
112+
113+
Check out our [comprehensive documentation](https://docs.plexe.ai) for:
114+
- Detailed API reference
115+
- Advanced usage examples
116+
- Best practices
117+
- Tutorials and guides
118+
119+
## 🛠️ Development
62120

63121
```bash
122+
# Clone the repository
64123
git clone https://github.com/plexe-ai/plexe
65124
cd plexe
125+
126+
# Install development dependencies
66127
pip install -e ".[dev]"
128+
129+
# Run tests
67130
pytest
68131
```
69132

70-
## License
133+
## 🤝 Contributing
134+
135+
We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
136+
137+
## 📄 License
138+
139+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
140+
141+
---
142+
143+
<div align="center">
144+
145+
Made with ❤️ by [Plexe AI](https://plexe.ai)
71146

72-
MIT License
147+
</div>

‎docs/img/plexe-logo.svg‎

Lines changed: 10 additions & 0 deletions
Loading

‎plexe/__init__.py‎

Lines changed: 39 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -3,63 +3,76 @@
33
from .client import PlexeAI
44

55
def build(goal: str,
6+
model_name: str,
67
data_files: Optional[Union[str, Path, List[Union[str, Path]]]] = None,
7-
data_dir: Optional[str] = None,
8-
api_key: str = "",
9-
steps: int = 5,
8+
upload_id: Optional[str] = None,
9+
api_key: str = "",
1010
eval_criteria: Optional[str] = None) -> str:
1111
"""Build a new ML model.
1212
1313
Args:
1414
goal: Description of what the model should do
15+
model_name: Name for the model
1516
data_files: Optional path(s) to data file(s) to upload
16-
data_dir: Optional data directory (if files already uploaded)
17+
upload_id: Optional upload_id if files were already uploaded
1718
api_key: API key for authentication
18-
steps: Number of improvement iterations
1919
eval_criteria: Optional evaluation criteria
2020
2121
Returns:
22-
experiment_id: ID of the created experiment
22+
model_version: Version ID of the created model
2323
"""
2424
client = PlexeAI(api_key=api_key)
25-
return client.build(goal=goal, data_files=data_files, data_dir=data_dir,
26-
steps=steps, eval_criteria=eval_criteria)
25+
return client.build(goal=goal, model_name=model_name,
26+
data_files=data_files, upload_id=upload_id,
27+
eval_criteria=eval_criteria)
2728

2829
async def abuild(goal: str,
30+
model_name: str,
2931
data_files: Optional[Union[str, Path, List[Union[str, Path]]]] = None,
30-
data_dir: Optional[str] = None,
31-
api_key: str = "",
32-
steps: int = 5,
32+
upload_id: Optional[str] = None,
33+
api_key: str = "",
3334
eval_criteria: Optional[str] = None) -> str:
3435
"""Build a new ML model asynchronously."""
3536
client = PlexeAI(api_key=api_key)
36-
return await client.abuild(goal=goal, data_files=data_files, data_dir=data_dir,
37-
steps=steps, eval_criteria=eval_criteria)
37+
return await client.abuild(goal=goal, model_name=model_name,
38+
data_files=data_files, upload_id=upload_id,
39+
eval_criteria=eval_criteria)
3840

39-
def infer(experiment_id: str, input_data: dict, api_key: str = "") -> dict:
41+
def infer(model_name: str, model_version: str, input_data: dict, api_key: str = "") -> dict:
4042
"""Run inference using a built model."""
4143
client = PlexeAI(api_key=api_key)
42-
return client.infer(experiment_id=experiment_id, input_data=input_data)
44+
return client.infer(model_name=model_name, model_version=model_version, input_data=input_data)
4345

44-
async def ainfer(experiment_id: str, input_data: dict, api_key: str = "") -> dict:
46+
async def ainfer(model_name: str, model_version: str, input_data: dict, api_key: str = "") -> dict:
4547
"""Run inference using a model asynchronously."""
4648
client = PlexeAI(api_key=api_key)
47-
return await client.ainfer(experiment_id=experiment_id, input_data=input_data)
49+
return await client.ainfer(model_name=model_name, model_version=model_version, input_data=input_data)
4850

49-
def batch_infer(experiment_id: str, inputs: List[dict], api_key: str = "") -> List[dict]:
51+
def batch_infer(model_name: str, model_version: str, inputs: List[dict], api_key: str = "") -> List[dict]:
5052
"""Run batch predictions."""
5153
client = PlexeAI(api_key=api_key)
52-
return client.batch_infer(experiment_id=experiment_id, inputs=inputs)
54+
return client.batch_infer(model_name=model_name, model_version=model_version, inputs=inputs)
5355

54-
def get_status(experiment_id: str, api_key: str = "") -> dict:
55-
"""Get status of an experiment build."""
56+
def get_status(model_name: str, model_version: str, api_key: str = "") -> dict:
57+
"""Get status of a model build."""
5658
client = PlexeAI(api_key=api_key)
57-
return client.get_status(experiment_id=experiment_id)
59+
return client.get_status(model_name=model_name, model_version=model_version)
5860

59-
async def aget_status(experiment_id: str, api_key: str = "") -> dict:
60-
"""Get status of an experiment build asynchronously."""
61+
async def aget_status(model_name: str, model_version: str, api_key: str = "") -> dict:
62+
"""Get status of a model build asynchronously."""
6163
client = PlexeAI(api_key=api_key)
62-
return await client.aget_status(experiment_id=experiment_id)
64+
return await client.aget_status(model_name=model_name, model_version=model_version)
65+
66+
def cleanup_upload(upload_id: str, api_key: str = "") -> dict:
67+
"""Clean up uploaded files."""
68+
client = PlexeAI(api_key=api_key)
69+
return client.cleanup_upload(upload_id=upload_id)
70+
71+
async def acleanup_upload(upload_id: str, api_key: str = "") -> dict:
72+
"""Clean up uploaded files asynchronously."""
73+
client = PlexeAI(api_key=api_key)
74+
return await client.acleanup_upload(upload_id=upload_id)
6375

6476
__all__ = ['PlexeAI', 'build', 'abuild', 'infer', 'ainfer',
65-
'batch_infer', 'get_status', 'aget_status']
77+
'batch_infer', 'get_status', 'aget_status',
78+
'cleanup_upload', 'acleanup_upload']

0 commit comments

Comments
 (0)