Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
name: CI

on:
push:
branches: [main]
pull_request:
branches: [main]

jobs:
lint:
runs-on: ubuntu-latest
defaults:
run:
working-directory: Project
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- run: pip install ruff
- run: ruff check .
- run: ruff format --check .

test:
runs-on: ubuntu-latest
defaults:
run:
working-directory: Project
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.12"
cache: pip
- run: |
pip install -r requirements.txt
pip install pytest pytest-cov httpx
- run: pytest tests/ -v --tb=short

docker:
runs-on: ubuntu-latest
needs: [lint, test]
steps:
- uses: actions/checkout@v4
- run: docker compose -f Project/docker-compose.yml build
16 changes: 14 additions & 2 deletions Project/.gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,15 @@
# Data
/data/
data/

# MLflow
mlruns/

# Models (track via MLflow artifacts, not git)
*.pkl

# Reports
reports/*.html

# Mac OS-specific storage files
.DS_Store
Expand Down Expand Up @@ -185,7 +195,9 @@ cython_debug/
# PyPI configuration file
.pypirc

# Pixi env
# Pixi env
*.lock
pixi.toml

# Docker
docker-compose.override.yml

118 changes: 59 additions & 59 deletions Project/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,100 +3,100 @@
#################################################################################

PROJECT_NAME = LeanAI
PYTHON_VERSION = 3.12.9
PYTHON_VERSION = 3.12
PYTHON_INTERPRETER = python

#################################################################################
# COMMANDS #
#################################################################################


## Install Python dependencies
.PHONY: requirements
requirements:
uv pip install -r requirements.txt


pip install -r requirements.txt

## Install dev dependencies
.PHONY: dev
dev:
pip install -r requirements.txt
pip install pytest pytest-cov ruff httpx

## Delete all compiled Python files
.PHONY: clean
clean:
find . -type f -name "*.py[co]" -delete
find . -type d -name "__pycache__" -delete
## Run tests
.PHONY: test
test:
pytest tests/ -v --tb=short

## Run tests with coverage
.PHONY: coverage
coverage:
pytest tests/ -v --cov=. --cov-report=term-missing

## Lint using ruff (use `make format` to do formatting)
## Lint using ruff
.PHONY: lint
lint:
ruff format --check
ruff check
ruff check .
ruff format --check .

## Format source code with ruff
.PHONY: format
format:
ruff check --fix
ruff format


ruff check --fix .
ruff format .

## Delete all compiled Python files
.PHONY: clean
clean:
find . -type f -name "*.py[co]" -delete
find . -type d -name "__pycache__" -delete

## Process dataset
.PHONY: data
data:
$(PYTHON_INTERPRETER) dataset.py

## Set up Python interpreter environment
.PHONY: create_environment
create_environment:
uv venv --python $(PYTHON_VERSION)
@echo ">>> New uv virtual environment created. Activate with:"
@echo ">>> Windows: .\\\\.venv\\\\Scripts\\\\activate"
@echo ">>> Unix/macOS: source ./.venv/bin/activate"

## Generate plots
.PHONY: plots
plots:
$(PYTHON_INTERPRETER) plots.py

## Train models
.PHONY: train
train:
$(PYTHON_INTERPRETER) -m modeling.train

## Run API locally
.PHONY: api
api:
uvicorn api.main:app --host 0.0.0.0 --port 8000 --reload

#################################################################################
# PROJECT RULES #
# DOCKER #
#################################################################################

.PHONY: docker-build docker-up docker-down

## Make dataset
.PHONY: data
data: requirements
$(PYTHON_INTERPRETER) leanai/dataset.py

## Train model

.PHONY: build up down api jupyter train clean
## Build all Docker containers
docker-build:
docker compose build

# Build all containers
build:
docker-compose build
## Start all containers
docker-up:
docker compose up -d

# Run all containers
up:
docker-compose up -d
## Stop all containers
docker-down:
docker compose down

# Down all containers
down:
docker-compose down
## Run API container
docker-api:
docker compose up -d backend

# Run API
api:
docker-compose up -d backend

# Run Jupyter
jupyter:
docker-compose up -d jupyter

# Fit model
train:
docker-compose run --rm train

# clean models
clean:
rm -rf models/*
## Train via Docker
docker-train:
docker compose run --profile train --rm train

#################################################################################
# Self Documenting Commands #
# HELP #
#################################################################################

.DEFAULT_GOAL := help
Expand Down
1 change: 0 additions & 1 deletion Project/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@
from leanai import config # noqa: F401
74 changes: 40 additions & 34 deletions Project/api/main.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,60 @@
from fastapi import FastAPI, Request, Form
import logging
from os import getenv
from pathlib import Path

from fastapi import FastAPI, Form, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.templating import Jinja2Templates
import pickle
import numpy as np
from pathlib import Path
from os import getenv
import joblib
import numpy as np

logger = logging.getLogger(__name__)

# Create FastAPI instance
app = FastAPI(
title="Body Fat Prediction API",
description="This API predicts body fat percentage based on body measurements.",
version="1.0.0",
description="Predicts body fat percentage based on body measurements.",
version="2.0.0",
)

ALLOWED_ORIGINS = getenv("ALLOWED_ORIGINS", "http://localhost:3000,http://localhost:8000").split(
","
)

# Enable CORS for external requests
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allow all origins
allow_origins=ALLOWED_ORIGINS,
allow_credentials=True,
allow_methods=["*"], # Allow all HTTP methods (GET, POST, etc.)
allow_headers=["*"], # Allow all headers
allow_methods=["GET", "POST"],
allow_headers=["Content-Type", "Accept"],
)

# Load Jinja2 templates
templates = Jinja2Templates(directory="api/templates")

# Load model
MODEL_PATH = Path(getenv("MODEL_PATH", "/app/models/model.pkl"))

if MODEL_PATH.exists():
model = joblib.load(MODEL_PATH)
logger.info("Model loaded from %s", MODEL_PATH)
else:
model = None
logger.warning("Model not found at %s", MODEL_PATH)


# Define routes
@app.get("/")
def form_page(request: Request):
return templates.TemplateResponse("form.html", {"request": request})
return templates.TemplateResponse(request, "form.html")


@app.get("/health")
def health_check():
return {"status": "healthy", "model_loaded": model is not None}

@app.post("/predict/", summary="Predict body fat percentage", description="Send body measurements to get a body fat percentage prediction.")

@app.post(
"/predict/",
summary="Predict body fat percentage",
description="Send body measurements to get a body fat percentage prediction.",
)
async def predict(
request: Request,
abdomen: float = Form(..., description="Abdomen circumference (cm)"),
Expand All @@ -51,27 +66,18 @@ async def predict(
neck: float = Form(..., description="Neck circumference (cm)"),
):
if model is None:
return {"error": "Model not found"}
return {"error": "Model not loaded. Check MODEL_PATH configuration."}

# Create array with features
features = np.array([[abdomen, hip, weight, thigh, knee, biceps, neck]])
print(features)

# Ensure model has a predict function
if not hasattr(model, "predict"):
return {"error": f"Invalid model type: {type(model)}"}
return {"error": f"Invalid model type: {type(model).__name__}"}

# Get prediction
features = np.array([[abdomen, hip, weight, thigh, knee, biceps, neck]])
prediction = model.predict(features)[0]

# Detect if the request wants JSON or HTML
accept_type = request.headers.get("accept", "")
if "application/json" in accept_type:
return {"prediction": round(float(prediction), 2)}

if "application/json" in accept_type: # API request
return {"prediction": float(prediction)}

# Otherwise, return the HTML page with the prediction result
return templates.TemplateResponse("form.html", {
"request": request,
"prediction": prediction
})
return templates.TemplateResponse(
request, "form.html", {"prediction": round(float(prediction), 2)}
)
Loading
Loading