Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
232 changes: 186 additions & 46 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,62 +1,202 @@
#For a full list of backend/worker settings and their description, see common/settings.py
# Example environment configuration file
# Copy this to .env and fill in the appropriate values

DOCKER_BUILDER_CONTAINER=minute
APP_NAME=minute
ENVIRONMENT=local
APP_URL=http://localhost:3000
BACKEND_HOST=http://localhost:8080
# ==================================================
# AWS
# ==================================================

# === AWS ===
AWS_ACCOUNT_ID=<account_id>
AWS_REGION=eu-west-2
DATA_S3_BUCKET=i-dot-ai-dev-minute-data
# AWS account ID
# AWS_ACCOUNT_ID=<please specify a value>

# We get these using aws-vault, docker compose picks them up from the environment variables
AWS_ACCESS_KEY_ID=<>
AWS_SECRET_ACCESS_KEY=<>
AWS_SESSION_TOKEN=<>
# AWS region
# AWS_REGION=<please specify a value>

# === Azure LLM config ===
AZURE_OPENAI_API_KEY=<>
AZURE_OPENAI_ENDPOINT=<>
AZURE_DEPLOYMENT=<>
AZURE_API_VERSION=<>
# ==================================================
# APPLICATION
# ==================================================

# === select transcription services ===
TRANSCRIPTION_SERVICES=["azure_stt_synchronous", "azure_stt_batch"]
# used for CORS origin validation
# APP_URL=<please specify a value>

# === Azure transcription config ===
AZURE_SPEECH_KEY=<api key to access azure speech>
AZURE_SPEECH_REGION=<region of azure speech>
AZURE_BLOB_CONNECTION_STRING=<used by azure batch transcription to store results>
AZURE_CONTAINER_NAME=<name of container to store azure transcription results>
# use "local" for local development, or dev,preprod or prod as appropriate
# ENVIRONMENT=local

# === Google cloud ===
GOOGLE_CLOUD_PROJECT=<>
GOOGLE_CLOUD_LOCATION=<>
GOOGLE_APPLICATION_CREDENTIALS=/app/config/google-credentials.json
GOOGLE_APPLICATION_CREDENTIALS_BASE64=<base64 encoded json of the above. only used in docker container>
# Sentry DSN if using Sentry for telemetry
# SENTRY_DSN=<please specify a value>

# ==================================================
# AZURE OPENAI
# ==================================================

# === postgres ===
POSTGRES_HOST=localhost
POSTGRES_PORT=5432
POSTGRES_DB=minute_db
POSTGRES_USER=postgres
POSTGRES_PASSWORD=insecure
# Azure deployment for openAI
# AZURE_DEPLOYMENT=<please specify a value>

# === queue service ===
QUEUE_NAME=minute-transcription-queue
DEADLETTER_QUEUE_NAME=minute-transcription-queue-deadletter
# Azure API key for openAI
# AZURE_OPENAI_API_KEY=<please specify a value>

# === required to authorise user in local development ===
REPO=minute
# Azure OpenAI API version
# AZURE_OPENAI_API_VERSION=<please specify a value>

# === other ===
SENTRY_DSN=placeholder
POSTHOG_API_KEY=<>
# Azure OpenAI service endpoint URL
# AZURE_OPENAI_ENDPOINT=<please specify a value>

# ==================================================
# AZURE SPEECH
# ==================================================

DISABLE_AUTH_SIGNATURE_VERIFICATION=true # Only ever set to true in local environments
# Azure STT speech key for API
# AZURE_SPEECH_KEY=<please specify a value>

# Region for Azure STT
# AZURE_SPEECH_REGION=<please specify a value>

# ==================================================
# CONTENT FILTERING
# ==================================================

# Transcript must have at least this many words to be passed to complex summary stage. Note, this is disabled by default as is lower than the MIN_WORD_COUNT_FOR_SUMMARY
# MIN_WORD_COUNT_FOR_FULL_SUMMARY=199

# Transcript must have at least this many words to be passed to summary stage
# MIN_WORD_COUNT_FOR_SUMMARY=200

# ==================================================
# FEATURES
# ==================================================

# List of template names available in beta. These are currently made available via a Posthog feature flag
# BETA_TEMPLATE_NAMES='[]'

# Should the LLM check for hallucinations? Note that the results of this are currently not surfaced in the UI
# HALLUCINATION_CHECK=False

# ==================================================
# GOOGLE CLOUD
# ==================================================

# Path to Google Cloud service account credentials JSON file
# GOOGLE_APPLICATION_CREDENTIALS=<please specify a value>

# Google Cloud region/location
# GOOGLE_CLOUD_LOCATION=<please specify a value>

# Google Cloud project ID
# GOOGLE_CLOUD_PROJECT=<please specify a value>

# ==================================================
# LLM CONFIGURATION
# ==================================================

# Best LLM model name to use. Note that this should be used for higher complexity LLM tasks, like initial minute generation.
# BEST_LLM_MODEL_NAME=gemini-2.5-flash

# Best LLM provider to use. Currently 'openai' or 'gemini' are supported. Note that this should be used for higher complexity LLM tasks, like initial minute generation.
# BEST_LLM_PROVIDER=gemini

# Fast LLM model name to use. Note that this should be used for low complexity LLM tasks
# FAST_LLM_MODEL_NAME=gemini-2.5-flash-lite

# Fast LLM provider to use. Currently 'openai' or 'gemini' are supported. Note that this should be used for low complexity LLM tasks, like AI edits
# FAST_LLM_PROVIDER=gemini

# ==================================================
# LOCALSTACK
# ==================================================

# LocalStack service URL for local AWS services emulation
# LOCALSTACK_URL=http://localhost:4566

# Use LocalStack for local AWS services emulation in dev
# USE_LOCALSTACK=True

# ==================================================
# POSTHOG
# ==================================================

# PostHog API key for analytics
# POSTHOG_API_KEY=<please specify a value>

# PostHog service host URL
# POSTHOG_HOST=https://eu.i.posthog.com

# ==================================================
# QUEUE SERVICES
# ==================================================

# Azure service bus connection string
# AZURE_SB_CONNECTION_STRING=<please specify a value>

# ==================================================
# QUEUE SERVICES
# ==================================================

# deadletter queue name to use for SQS. Ignored if using Azure Service Bus.
# DEADLETTER_QUEUE_NAME=<please specify a value>

# queue name to use for SQS/Azure Service Bus queues
# QUEUE_NAME=<please specify a value>

# Queue service type to communicate with worker. Currently supported are: sqs, azure-service-bus
# QUEUE_SERVICE_NAME=sqs

# ==================================================
# RAY
# ==================================================

# Ray dashboard host IP address. Use '0.0.0.0' if running inside docker
# RAY_DASHBOARD_HOST=127.0.0.1

# ==================================================
# STORAGE
# ==================================================

# Azure Blob Storage connection string
# AZURE_BLOB_CONNECTION_STRING=<please specify a value>

# Azure container name for transcription result files. Note that Azure Batch transcription requires this.
# AZURE_TRANSCRIPTION_CONTAINER_NAME=<please specify a value>

# Azure container name for uploaded files
# AZURE_UPLOADS_CONTAINER_NAME=<please specify a value>

# S3 bucket name for data storage
# DATA_S3_BUCKET=<please specify a value>

# Storage service type to use for file uploads. Currently supported are: s3, azure-blob
# STORAGE_SERVICE_NAME=s3

# ==================================================
# TRANSCRIPTION
# ==================================================

# List of service names to use for transcription. See backend/services/transcription_services
# TRANSCRIPTION_SERVICES='["azure_stt_synchronous", "azure_stt_batch"]'

# ==================================================
# WORKER CONFIGURATION
# ==================================================

# the number of LLM workers per node
# MAX_LLM_PROCESSES=1

# the number of transcription workers per node
# MAX_TRANSCRIPTION_PROCESSES=1

# ==================================================
# DATABASE
# ==================================================

# PostgreSQL database name
# POSTGRES_DB=<please specify a value>

# PostgreSQL database host
# POSTGRES_HOST=<please specify a value>

# PostgreSQL database password
# POSTGRES_PASSWORD=<please specify a value>

# PostgreSQL database port
# POSTGRES_PORT=<please specify a value>

# PostgreSQL database user
# POSTGRES_USER=<please specify a value>
46 changes: 46 additions & 0 deletions alembic/versions/d3dc5dd94d97_add_user_template.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
"""Add user template

Revision ID: d3dc5dd94d97
Revises: 0225e42255a1
Create Date: 2025-09-24 10:01:55.374936

"""

from collections.abc import Sequence

import sqlalchemy as sa
import sqlmodel

from alembic import op

# revision identifiers, used by Alembic.
revision: str = "d3dc5dd94d97"
down_revision: str | None = "0225e42255a1"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None


def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user_template",
sa.Column("id", sa.Uuid(), server_default=sa.text("gen_random_uuid()"), nullable=False),
sa.Column("created_datetime", sa.TIMESTAMP(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("updated_datetime", sa.TIMESTAMP(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("content", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("user_id", sa.Uuid(), nullable=True),
sa.ForeignKeyConstraint(["user_id"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.add_column("minute", sa.Column("user_template_id", sa.Uuid(), nullable=True))
op.create_foreign_key(None, "minute", "user_template", ["user_template_id"], ["id"], ondelete="SET NULL")
# ### end Alembic commands ###


def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("minute_user_template_id_fkey", "minute", type_="foreignkey")
op.drop_column("minute", "user_template_id")
op.drop_table("user_template")
# ### end Alembic commands ###
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
"""Add description to user template

Revision ID: fe0e69c8d4db
Revises: d3dc5dd94d97
Create Date: 2025-09-29 15:07:05.141384

"""

from collections.abc import Sequence

import sqlalchemy as sa
import sqlmodel

from alembic import op

# revision identifiers, used by Alembic.
revision: str = "fe0e69c8d4db"
down_revision: str | None = "d3dc5dd94d97"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None


def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"user_template",
sa.Column("description", sqlmodel.sql.sqltypes.AutoString(), nullable=False, server_default=""),
)
# ### end Alembic commands ###


def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("user_template", "description")
# ### end Alembic commands ###
7 changes: 6 additions & 1 deletion backend/api/routes/minutes.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,12 @@ async def create_minute(
transcription = await session.get(Transcription, transcription_id)
if not transcription or transcription.user_id != user.id:
raise HTTPException(404, "Not found")
minute = Minute(transcription_id=transcription_id, template_name=request.template_name, agenda=request.agenda)
minute = Minute(
transcription_id=transcription_id,
template_name=request.template_name,
agenda=request.agenda,
user_template_id=request.template_id,
)
session.add(minute)
minute_version = MinuteVersion(id=uuid.uuid4(), minute_id=minute.id)
session.add(minute_version)
Expand Down
Loading
Loading