Compare commits
19 Commits
e049e4e104
...
8f99d47af9
| Author | SHA1 | Date | |
|---|---|---|---|
| 8f99d47af9 | |||
| 1d36f196ca | |||
| 9f40ef93b7 | |||
| fda3e86a71 | |||
| d5303ad201 | |||
| 245be63f07 | |||
| 068df7715e | |||
| 95498180a2 | |||
| 7b6ba79417 | |||
| a228325d74 | |||
| e59ae66b85 | |||
| e524be15e5 | |||
| 2d90a4caac | |||
| 4e8c2b4f5d | |||
| 0ddeb91755 | |||
| cb3b98db10 | |||
| 487562042f | |||
| ac46518cf5 | |||
| c03fb17a5c |
30
README.md
30
README.md
@@ -2,6 +2,10 @@
|
||||
|
||||
这是一个基于FastAPI的聊天API服务,使用OpenAI格式的请求来调用pipeline.invoke方法进行聊天。
|
||||
|
||||
## Docker Installation
|
||||
|
||||
For production deployment using Docker, see the [Installation Guide](README_INSTALL.md).
|
||||
|
||||
## 安装依赖
|
||||
|
||||
```bash
|
||||
@@ -140,15 +144,35 @@ npm install
|
||||
|
||||
### Start the `front_apis` server
|
||||
|
||||
The frontend talks to the `front_apis` FastAPI service, which by default listens on `http://127.0.0.1:8001`.
|
||||
The frontend talks to the `front_apis` FastAPI service, which by default listens on `http://127.0.0.1:8500`.
|
||||
|
||||
From the project root:
|
||||
|
||||
```bash
|
||||
uvicorn fastapi_server.front_apis:app --reload --host 0.0.0.0 --port 8001
|
||||
uvicorn fastapi_server.front_apis:app --reload --host 0.0.0.0 --port 8500
|
||||
```
|
||||
|
||||
You can change the URL by setting `VITE_FRONT_API_BASE_URL` in `frontend/.env` (defaults to `http://127.0.0.1:8001`).
|
||||
Or run directly:
|
||||
```bash
|
||||
python fastapi_server/front_apis.py
|
||||
```
|
||||
|
||||
### Backend run modes
|
||||
|
||||
Run whichever backend mode you need from the project root:
|
||||
|
||||
```bash
|
||||
# admin/control plane only (/v1/... frontend APIs)
|
||||
uvicorn fastapi_server.front_apis:app --reload --host 0.0.0.0 --port 8500
|
||||
|
||||
# DashScope chat runtime only (/apps/... and /v1/apps/... APIs)
|
||||
uvicorn fastapi_server.server_dashscope:app --reload --host 0.0.0.0 --port 8588
|
||||
|
||||
# combined mode: one process serves both front_apis + DashScope endpoints
|
||||
uvicorn fastapi_server.combined:app --reload --host 0.0.0.0 --port 8500
|
||||
```
|
||||
|
||||
You can change the URL by setting `VITE_FRONT_API_BASE_URL` in `frontend/.env` (defaults to `http://127.0.0.1:8500`).
|
||||
|
||||
### Start the development server
|
||||
|
||||
|
||||
267
README_INSTALL.md
Normal file
267
README_INSTALL.md
Normal file
@@ -0,0 +1,267 @@
|
||||
# Installation Guide
|
||||
|
||||
This guide explains how to install and run the LangChain Agent application using Docker.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker (version 20.10 or later)
|
||||
- Docker Compose (version 2.0 or later, or use `docker compose` command)
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. **Run the installation script:**
|
||||
```bash
|
||||
./scripts/shell_scripts/install.sh
|
||||
```
|
||||
|
||||
This script will:
|
||||
- Check for required tools (Docker, docker-compose)
|
||||
- Create a `.env` file with default configuration
|
||||
- Build Docker images (or use pre-loaded images)
|
||||
- Start all services (PostgreSQL, Backend API, Nginx)
|
||||
|
||||
2. **Access the application:**
|
||||
- Frontend: http://localhost (or http://localhost:80)
|
||||
- Backend API: http://localhost:8500
|
||||
- Database: localhost:5432
|
||||
|
||||
## Installation for China / Offline Use
|
||||
|
||||
If Docker Hub is slow or inaccessible in your region:
|
||||
|
||||
### Option 1: Use Chinese Docker Mirrors
|
||||
|
||||
Configure Docker to use Chinese registry mirrors:
|
||||
|
||||
```bash
|
||||
sudo tee /etc/docker/daemon.json <<EOF
|
||||
{
|
||||
"registry-mirrors": [
|
||||
"https://registry.docker-cn.com",
|
||||
"https://mirror.ccsogou.com",
|
||||
"https://docker.1ms.run"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
Then run `./scripts/shell_scripts/install.sh`
|
||||
|
||||
### configuring '.env' and 'frontend/.env'
|
||||
```bash
|
||||
ALI_API_KEY="API_KEY_FOR_ALI_QWEN"
|
||||
ALI_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
POSTGRES_ROOT_PASSWORD="ROOT_PASSOWRD_FOR_DB_IN_CONN_STR - required for installation"
|
||||
POSTGRES_PASSWORD="USER_PASSWORD_FOR_DB_CONN_STR - required for installation" - need to be same in POSTGRES_PASSWORD
|
||||
CONN_STR="CONNECTION_STRING_TO_DATABASE" # DOCKER PASSWORD
|
||||
FAST_AUTH_KEYS="API_KEY_FOR_OTHER_APPLICATIONS_TO_USE_BUILT_PIPELINE"
|
||||
DAYTONA_API_KEY="DAYTONA_CONFIG - NOT REQUIRED"
|
||||
```
|
||||
|
||||
### Option 2: Pre-load Docker Images Offline
|
||||
|
||||
1. On a machine with good Docker Hub access, run:
|
||||
```bash
|
||||
./scripts/shell_scripts/download_images.sh
|
||||
```
|
||||
This creates `images.tar` with all required images.
|
||||
|
||||
2. Transfer `images.tar` to your target machine.
|
||||
|
||||
3. Load the images:
|
||||
```bash
|
||||
docker load < images.tar
|
||||
```
|
||||
|
||||
4. Run the install script:
|
||||
```bash
|
||||
./scripts/shell_scripts/install.sh
|
||||
```
|
||||
|
||||
## Manual Installation
|
||||
|
||||
If you prefer to set up manually:
|
||||
|
||||
1. **Create environment file:**
|
||||
```bash
|
||||
cp .env.example .env # Edit as needed
|
||||
```
|
||||
|
||||
2. **Build and start services:**
|
||||
```bash
|
||||
cd docker
|
||||
docker compose -f docker-compose.prod.yml up -d --build
|
||||
```
|
||||
|
||||
3. **Check service status:**
|
||||
```bash
|
||||
cd docker
|
||||
docker compose -f docker-compose.prod.yml ps
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Edit the `.env` file to customize:
|
||||
|
||||
- `POSTGRES_DB`: Database name (default: `ai_conversations`)
|
||||
- `POSTGRES_USER`: Database user (default: `myapp_user`)
|
||||
- `POSTGRES_PASSWORD`: Database password (default: `secure_password_123`)
|
||||
- `POSTGRES_PORT`: PostgreSQL port (default: `5432`)
|
||||
- `BACKEND_PORT`: Backend API port (default: `8500`)
|
||||
- `FRONTEND_PORT`: Frontend web server port (default: `80`)
|
||||
|
||||
## Database Initialization
|
||||
|
||||
The database is automatically initialized when the PostgreSQL container starts for the first time. The following SQL scripts are executed in order:
|
||||
|
||||
1. `scripts/init_database/00_init_user.sh` - Creates database user and database
|
||||
2. `scripts/init_database/create_conv_store.sql` - Creates conversation storage tables
|
||||
3. `scripts/init_database/create_prompt_config.sql` - Creates prompt configuration tables
|
||||
|
||||
## Service Management
|
||||
|
||||
All commands run from the `docker/` directory:
|
||||
|
||||
### View logs:
|
||||
```bash
|
||||
cd docker
|
||||
docker compose -f docker-compose.prod.yml logs -f
|
||||
|
||||
# Specific service
|
||||
docker compose -f docker-compose.prod.yml logs -f backend
|
||||
docker compose -f docker-compose.prod.yml logs -f postgres
|
||||
docker compose -f docker-compose.prod.yml logs -f nginx
|
||||
```
|
||||
|
||||
### Stop services:
|
||||
```bash
|
||||
cd docker
|
||||
docker compose -f docker-compose.prod.yml down
|
||||
```
|
||||
|
||||
### Restart services:
|
||||
```bash
|
||||
cd docker
|
||||
docker compose -f docker-compose.prod.yml restart
|
||||
```
|
||||
|
||||
### Rebuild after code changes:
|
||||
```bash
|
||||
cd docker
|
||||
docker compose -f docker-compose.prod.yml up -d --build
|
||||
```
|
||||
|
||||
### Reset database (delete all data):
|
||||
```bash
|
||||
cd docker
|
||||
docker compose -f docker-compose.prod.yml down -v
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
The application consists of three main services:
|
||||
|
||||
1. **PostgreSQL** (`postgres`): Database server
|
||||
- Stores conversations and prompt configurations
|
||||
- Automatically initializes schema on first run
|
||||
|
||||
2. **Backend** (`backend`): FastAPI application
|
||||
- Serves API endpoints at port 8500
|
||||
- Handles agent management and chat endpoints
|
||||
- Connects to PostgreSQL database
|
||||
|
||||
3. **Nginx** (`nginx`): Web server
|
||||
- Serves the React frontend (port 80)
|
||||
- Proxies API requests to the backend
|
||||
- Handles static file serving
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
langchain-agent/
|
||||
├── docker/
|
||||
│ ├── docker-compose.prod.yml # Production compose file
|
||||
│ └── Dockerfile.prod # Backend Docker image
|
||||
├── scripts/
|
||||
│ ├── shell_scripts/
|
||||
│ │ ├── install.sh # Main installation script
|
||||
│ │ └── download_images.sh # For offline image download
|
||||
│ └── init_database/ # Database initialization scripts
|
||||
├── frontend/ # React frontend
|
||||
├── configs/ # Pipeline configurations
|
||||
├── nginx.conf # Nginx configuration
|
||||
└── .env # Environment variables
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Database connection issues
|
||||
|
||||
If the backend can't connect to the database:
|
||||
|
||||
1. Check that PostgreSQL is running:
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml ps postgres
|
||||
```
|
||||
|
||||
2. Verify the connection string in `.env` matches the database configuration
|
||||
|
||||
3. Check backend logs:
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml logs backend
|
||||
```
|
||||
|
||||
### Frontend not loading / NetworkError
|
||||
|
||||
1. Check nginx logs:
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml logs nginx
|
||||
```
|
||||
|
||||
2. Ensure frontend is built with correct API base URL. The `frontend/.env` file should contain:
|
||||
```
|
||||
VITE_FRONT_API_BASE_URL=/
|
||||
```
|
||||
Then rebuild: `docker compose -f docker-compose.prod.yml build backend`
|
||||
|
||||
### Port conflicts
|
||||
|
||||
If ports are already in use, update the port mappings in `.env`:
|
||||
|
||||
```bash
|
||||
# Example: use port 5433 for PostgreSQL
|
||||
POSTGRES_PORT=5433
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
For development, you may want to run services separately:
|
||||
|
||||
1. Start only PostgreSQL:
|
||||
```bash
|
||||
cd docker
|
||||
docker compose -f docker-compose.prod.yml up -d postgres
|
||||
```
|
||||
|
||||
2. Run backend locally:
|
||||
```bash
|
||||
export CONN_STR="postgresql://myapp_user:secure_password_123@localhost:5432/ai_conversations"
|
||||
python -m uvicorn lang_agent.fastapi_server.combined:app --reload --host 0.0.0.0 --port 8500
|
||||
```
|
||||
|
||||
3. Run frontend locally:
|
||||
```bash
|
||||
cd frontend
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Note: For local frontend development, create a `.env` file in `frontend/` with:
|
||||
```
|
||||
VITE_FRONT_API_BASE_URL=http://localhost:8500
|
||||
```
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
you are a helpful bot enhanced with skills.
|
||||
You are a helpful bot enhanced with skills.
|
||||
|
||||
To use a skill, read its SKILL.md file using the read_file tool. Skills are NOT tools — they are instructions for using existing tools.
|
||||
Skills with available="false" need dependencies installed first - you can try installing them with apt/brew. You can check if the environment the packages you need.
|
||||
|
||||
When using a skill, assume required tools (e.g., npx, curl) are available and execute the commands directly. If a command fails because a tool is missing, install the missing dependency using apt/brew and retry.
|
||||
|
||||
For shell commands (e.g., npx, curl), use the execute tool to run them.
|
||||
85
docker/Dockerfile.prod
Normal file
85
docker/Dockerfile.prod
Normal file
@@ -0,0 +1,85 @@
|
||||
# Multi-stage Dockerfile for production deployment
|
||||
# Stage 1: Build frontend
|
||||
FROM node:20-alpine AS frontend-builder
|
||||
|
||||
WORKDIR /app/frontend
|
||||
|
||||
# Copy frontend files
|
||||
COPY frontend/package*.json ./
|
||||
RUN npm ci
|
||||
|
||||
COPY frontend/ ./
|
||||
RUN npm run build
|
||||
|
||||
# Stage 2: Python backend
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
postgresql-client \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy Python dependencies
|
||||
COPY pyproject.toml ./
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir -e .
|
||||
|
||||
# Copy application code
|
||||
COPY lang_agent/ ./lang_agent/
|
||||
COPY configs/ ./configs/
|
||||
COPY scripts/ ./scripts/
|
||||
COPY assets/ ./assets/
|
||||
COPY static/ ./static/
|
||||
|
||||
# Copy built frontend from stage 1
|
||||
COPY --from=frontend-builder /app/frontend/dist ./frontend/dist
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONPATH=/app
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8500
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||
CMD curl -f http://localhost:8500/health || exit 1
|
||||
|
||||
# Create entrypoint script to wait for DB
|
||||
# Uses Python to check database connection (more reliable than psql)
|
||||
RUN echo '#!/bin/bash\n\
|
||||
set -e\n\
|
||||
echo "Waiting for database to be ready..."\n\
|
||||
python3 << EOF\n\
|
||||
import sys\n\
|
||||
import time\n\
|
||||
import psycopg\n\
|
||||
\n\
|
||||
max_attempts = 30\n\
|
||||
conn_str = "${CONN_STR}"\n\
|
||||
\n\
|
||||
for i in range(max_attempts):\n\
|
||||
try:\n\
|
||||
with psycopg.connect(conn_str, connect_timeout=2) as conn:\n\
|
||||
with conn.cursor() as cur:\n\
|
||||
cur.execute("SELECT 1")\n\
|
||||
print("Database is ready!")\n\
|
||||
sys.exit(0)\n\
|
||||
except Exception as e:\n\
|
||||
if i == max_attempts - 1:\n\
|
||||
print(f"Warning: Database not ready after {max_attempts * 2} seconds, continuing anyway...")\n\
|
||||
print(f"Error: {e}")\n\
|
||||
sys.exit(0)\n\
|
||||
print(f"Database is unavailable - sleeping (attempt {i+1}/{max_attempts})")\n\
|
||||
time.sleep(2)\n\
|
||||
EOF\n\
|
||||
exec "$@"' > /entrypoint.sh && chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
# Run the combined server
|
||||
CMD ["python", "-m", "uvicorn", "lang_agent.fastapi_server.combined:app", "--host", "0.0.0.0", "--port", "8500"]
|
||||
|
||||
84
docker/docker-compose.prod.yml
Normal file
84
docker/docker-compose.prod.yml
Normal file
@@ -0,0 +1,84 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# PostgreSQL database
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: langchain-agent-db
|
||||
networks:
|
||||
- app-network
|
||||
environment:
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: ${POSTGRES_ROOT_PASSWORD:-postgres_root_password}
|
||||
# These are used by init scripts to create the app database
|
||||
APP_DB_NAME: ${POSTGRES_DB:-ai_conversations}
|
||||
APP_DB_USER: ${POSTGRES_USER:-myapp_user}
|
||||
APP_DB_PASSWORD: ${POSTGRES_PASSWORD:-secure_password_123}
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
- ../scripts/init_database:/docker-entrypoint-initdb.d
|
||||
ports:
|
||||
- "${POSTGRES_PORT:-5432}:5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
restart: unless-stopped
|
||||
|
||||
# Backend API server
|
||||
backend:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile.prod
|
||||
container_name: langchain-agent-backend
|
||||
environment:
|
||||
- PYTHONPATH=/app
|
||||
- PYTHONUNBUFFERED=1
|
||||
- CONN_STR=postgresql://${POSTGRES_USER:-myapp_user}:${POSTGRES_PASSWORD:-secure_password_123}@postgres:5432/${POSTGRES_DB:-ai_conversations}
|
||||
- POSTGRES_USER=${POSTGRES_USER:-myapp_user}
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-secure_password_123}
|
||||
- POSTGRES_DB=${POSTGRES_DB:-ai_conversations}
|
||||
ports:
|
||||
- "${BACKEND_PORT:-8500}:8500"
|
||||
volumes:
|
||||
- ../configs:/app/configs
|
||||
- ../scripts:/app/scripts
|
||||
- ../assets:/app/assets
|
||||
- ../static:/app/static
|
||||
networks:
|
||||
- app-network
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8500/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Nginx for serving frontend (optional - can also serve via FastAPI)
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: langchain-agent-nginx
|
||||
networks:
|
||||
- app-network
|
||||
ports:
|
||||
- "${FRONTEND_PORT:-80}:80"
|
||||
volumes:
|
||||
- ../nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ../frontend/dist:/usr/share/nginx/html:ro
|
||||
depends_on:
|
||||
- backend
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
|
||||
networks:
|
||||
app-network:
|
||||
driver: bridge
|
||||
|
||||
76
nginx.conf
Normal file
76
nginx.conf
Normal file
@@ -0,0 +1,76 @@
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
sendfile on;
|
||||
keepalive_timeout 65;
|
||||
|
||||
# Upstream backend
|
||||
upstream backend {
|
||||
server backend:8500;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
|
||||
# Serve frontend static files
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
# Proxy API requests to backend
|
||||
location /v1/ {
|
||||
proxy_pass http://backend;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
}
|
||||
|
||||
# Proxy DashScope API requests
|
||||
location /apps/ {
|
||||
proxy_pass http://backend;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
}
|
||||
|
||||
# Proxy v1/apps requests
|
||||
location /v1/apps/ {
|
||||
proxy_pass http://backend;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
}
|
||||
|
||||
# Health check endpoint
|
||||
location /health {
|
||||
proxy_pass http://backend/health;
|
||||
access_log off;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,10 @@ dependencies = [
|
||||
"commentjson",
|
||||
"pandas",
|
||||
"asgiref",
|
||||
"psycopg[binary]"
|
||||
"psycopg[binary]",
|
||||
"deepagents",
|
||||
"daytona",
|
||||
"langchain_daytona"
|
||||
]
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
|
||||
40
scripts/init_database/00_init_user.sh
Executable file
40
scripts/init_database/00_init_user.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
# Initialize database user and database
|
||||
# This script runs before SQL files in docker-entrypoint-initdb.d
|
||||
# It must be named with 00_ prefix to run first
|
||||
|
||||
set -e
|
||||
|
||||
APP_DB_NAME="${APP_DB_NAME:-ai_conversations}"
|
||||
APP_DB_USER="${APP_DB_USER:-myapp_user}"
|
||||
APP_DB_PASSWORD="${APP_DB_PASSWORD:-secure_password_123}"
|
||||
|
||||
echo "Creating database user: $APP_DB_USER"
|
||||
# Create user
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
DO \$\$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_user WHERE usename = '$APP_DB_USER') THEN
|
||||
CREATE USER $APP_DB_USER WITH PASSWORD '$APP_DB_PASSWORD';
|
||||
END IF;
|
||||
END
|
||||
\$\$;
|
||||
ALTER USER $APP_DB_USER CREATEDB;
|
||||
EOSQL
|
||||
|
||||
echo "Creating database: $APP_DB_NAME"
|
||||
# Create database
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
SELECT 'CREATE DATABASE $APP_DB_NAME'
|
||||
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '$APP_DB_NAME')\gexec
|
||||
GRANT ALL PRIVILEGES ON DATABASE $APP_DB_NAME TO $APP_DB_USER;
|
||||
EOSQL
|
||||
|
||||
echo "Granting schema privileges"
|
||||
# Grant schema privileges
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$APP_DB_NAME" <<-EOSQL
|
||||
GRANT ALL ON SCHEMA public TO $APP_DB_USER;
|
||||
EOSQL
|
||||
|
||||
echo "Database initialization complete!"
|
||||
|
||||
25
scripts/init_database/01_run_sql_files.sh
Executable file
25
scripts/init_database/01_run_sql_files.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
# Run SQL initialization files in the correct database context
|
||||
# This script runs after 00_init_user.sh creates the database
|
||||
|
||||
set -e
|
||||
|
||||
APP_DB_NAME="${APP_DB_NAME:-ai_conversations}"
|
||||
|
||||
echo "Running SQL initialization files in database: $APP_DB_NAME"
|
||||
|
||||
# Run create_conv_store.sql
|
||||
if [ -f /docker-entrypoint-initdb.d/create_conv_store.sql ]; then
|
||||
echo "Executing create_conv_store.sql..."
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$APP_DB_NAME" -f /docker-entrypoint-initdb.d/create_conv_store.sql
|
||||
fi
|
||||
|
||||
# Run create_prompt_config.sql
|
||||
if [ -f /docker-entrypoint-initdb.d/create_prompt_config.sql ]; then
|
||||
echo "Executing create_prompt_config.sql..."
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$APP_DB_NAME" -f /docker-entrypoint-initdb.d/create_prompt_config.sql
|
||||
fi
|
||||
|
||||
echo "SQL initialization files completed!"
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
-- Create the messages table
|
||||
-- This script runs in the ai_conversations database context
|
||||
CREATE TABLE IF NOT EXISTS messages (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
conversation_id TEXT NOT NULL,
|
||||
@@ -13,4 +14,8 @@ CREATE TABLE IF NOT EXISTS messages (
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conversation ON messages (conversation_id, sequence_number);
|
||||
|
||||
-- Index for fast lookup by pipeline_id
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_pipeline ON messages (pipeline_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_pipeline ON messages (pipeline_id);
|
||||
|
||||
-- Grant permissions to app user
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO myapp_user;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO myapp_user;
|
||||
@@ -1,4 +1,5 @@
|
||||
-- A prompt_set groups a full collection of prompts together.
|
||||
-- This script runs in the ai_conversations database context
|
||||
-- Each pipeline can have many sets (versions, A/B variants, etc.);
|
||||
-- exactly one should be marked is_active per pipeline.
|
||||
CREATE TABLE IF NOT EXISTS prompt_sets (
|
||||
@@ -41,6 +42,10 @@ CREATE TABLE IF NOT EXISTS prompt_templates (
|
||||
CREATE INDEX IF NOT EXISTS idx_prompt_templates_set_id
|
||||
ON prompt_templates(prompt_set_id);
|
||||
|
||||
-- Grant permissions to app user
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO myapp_user;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO myapp_user;
|
||||
|
||||
-- Seed: initial prompt set for lang_agent/graphs/routing.py
|
||||
-- The pipeline_id can be used by RoutingConfig.pipeline_id to load these prompts.
|
||||
INSERT INTO prompt_sets (pipeline_id, graph_id, name, description, is_active, list)
|
||||
|
||||
49
scripts/init_database/init_all.sh
Executable file
49
scripts/init_database/init_all.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
# Database initialization script
|
||||
# This script runs all SQL initialization files in the correct order
|
||||
|
||||
set -e
|
||||
|
||||
DB_NAME="${POSTGRES_DB:-ai_conversations}"
|
||||
DB_USER="${POSTGRES_USER:-myapp_user}"
|
||||
DB_PASSWORD="${POSTGRES_PASSWORD:-secure_password_123}"
|
||||
DB_HOST="${POSTGRES_HOST:-localhost}"
|
||||
DB_PORT="${POSTGRES_PORT:-5432}"
|
||||
|
||||
export PGPASSWORD="$DB_PASSWORD"
|
||||
|
||||
echo "Initializing database: $DB_NAME on $DB_HOST:$DB_PORT"
|
||||
|
||||
# Wait for PostgreSQL to be ready
|
||||
until psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d postgres -c '\q' 2>/dev/null; do
|
||||
echo "Waiting for PostgreSQL to be ready..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "PostgreSQL is ready!"
|
||||
|
||||
# Create database if it doesn't exist
|
||||
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d postgres <<EOF
|
||||
SELECT 'CREATE DATABASE $DB_NAME'
|
||||
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '$DB_NAME')\gexec
|
||||
EOF
|
||||
|
||||
# Grant privileges
|
||||
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d postgres <<EOF
|
||||
GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER;
|
||||
EOF
|
||||
|
||||
# Run initialization scripts in order
|
||||
echo "Running database initialization scripts..."
|
||||
|
||||
# 1. Create conversation store tables
|
||||
echo "Creating conversation store tables..."
|
||||
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -f /docker-entrypoint-initdb.d/create_conv_store.sql
|
||||
|
||||
# 2. Create prompt configuration tables
|
||||
echo "Creating prompt configuration tables..."
|
||||
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -f /docker-entrypoint-initdb.d/create_prompt_config.sql
|
||||
|
||||
echo "Database initialization complete!"
|
||||
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
source ~/.bashrc
|
||||
conda init
|
||||
conda activate lang
|
||||
37
scripts/shell_scripts/download_images.sh
Executable file
37
scripts/shell_scripts/download_images.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
# Script to download and package Docker images for offline use
|
||||
# Run this on a machine with good Docker Hub access, then transfer images.tar to China
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== Docker Image Downloader for Offline Use ==="
|
||||
echo ""
|
||||
|
||||
# Images needed
|
||||
IMAGES=(
|
||||
"node:20-alpine"
|
||||
"python:3.12-slim"
|
||||
"postgres:16-alpine"
|
||||
"nginx:alpine"
|
||||
)
|
||||
|
||||
OUTPUT_FILE="images.tar"
|
||||
|
||||
echo "Pulling Docker images..."
|
||||
for img in "${IMAGES[@]}"; do
|
||||
echo " Pulling $img..."
|
||||
docker pull "$img"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "Saving to $OUTPUT_FILE..."
|
||||
docker save "${IMAGES[@]}" -o "$OUTPUT_FILE"
|
||||
|
||||
echo ""
|
||||
echo "Done! File size:"
|
||||
ls -lh "$OUTPUT_FILE"
|
||||
|
||||
echo ""
|
||||
echo "To transfer to China machine and load:"
|
||||
echo " scp images.tar user@china-machine:/path/"
|
||||
echo " docker load < images.tar"
|
||||
167
scripts/shell_scripts/install.sh
Executable file
167
scripts/shell_scripts/install.sh
Executable file
@@ -0,0 +1,167 @@
|
||||
#!/bin/bash
|
||||
# Installation script for LangChain Agent
|
||||
# This script sets up and runs the entire application stack
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
|
||||
ENV_FILE="$PROJECT_ROOT/.env"
|
||||
|
||||
echo -e "${GREEN}=== LangChain Agent Installation Script ===${NC}\n"
|
||||
|
||||
# Check for required tools
|
||||
check_requirements() {
|
||||
echo -e "${YELLOW}Checking requirements...${NC}"
|
||||
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo -e "${RED}Error: Docker is not installed. Please install Docker first.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then
|
||||
echo -e "${RED}Error: docker-compose is not installed. Please install docker-compose first.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓ All requirements met${NC}\n"
|
||||
}
|
||||
|
||||
# Create .env file if it doesn't exist
|
||||
create_env_file() {
|
||||
if [ ! -f "$ENV_FILE" ]; then
|
||||
echo -e "${YELLOW}Creating .env file...${NC}"
|
||||
cat > "$ENV_FILE" <<EOF
|
||||
# Database Configuration
|
||||
POSTGRES_DB=ai_conversations
|
||||
POSTGRES_USER=myapp_user
|
||||
POSTGRES_PASSWORD=secure_password_123
|
||||
POSTGRES_PORT=5432
|
||||
|
||||
# Backend Configuration
|
||||
BACKEND_PORT=8500
|
||||
|
||||
# Frontend Configuration
|
||||
FRONTEND_PORT=80
|
||||
|
||||
# Database Connection String (used by backend)
|
||||
CONN_STR=postgresql://myapp_user:secure_password_123@postgres:5432/ai_conversations
|
||||
EOF
|
||||
echo -e "${GREEN}✓ Created .env file at $ENV_FILE${NC}"
|
||||
echo -e "${YELLOW} Please review and update the .env file with your preferred settings.${NC}\n"
|
||||
else
|
||||
echo -e "${GREEN}✓ .env file already exists${NC}\n"
|
||||
fi
|
||||
}
|
||||
|
||||
# Build Docker images
|
||||
build_images() {
|
||||
echo -e "${YELLOW}Building Docker images...${NC}"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Check if docker-compose or docker compose
|
||||
if docker compose version &> /dev/null; then
|
||||
COMPOSE_CMD="docker compose"
|
||||
else
|
||||
COMPOSE_CMD="docker-compose"
|
||||
fi
|
||||
|
||||
$COMPOSE_CMD -f docker/docker-compose.prod.yml build
|
||||
|
||||
echo -e "${GREEN}✓ Docker images built successfully${NC}\n"
|
||||
}
|
||||
|
||||
# Initialize database
|
||||
init_database() {
|
||||
echo -e "${YELLOW}Initializing database...${NC}"
|
||||
|
||||
# Wait for PostgreSQL to be ready
|
||||
echo "Waiting for PostgreSQL to start..."
|
||||
sleep 5
|
||||
|
||||
# The SQL files in scripts/init_database/ will be automatically executed
|
||||
# by PostgreSQL's docker-entrypoint-initdb.d mechanism
|
||||
# We just need to wait a bit for it to complete
|
||||
|
||||
echo -e "${GREEN}✓ Database initialization will be handled automatically by PostgreSQL container${NC}\n"
|
||||
}
|
||||
|
||||
# Start services
|
||||
start_services() {
|
||||
echo -e "${YELLOW}Starting services...${NC}"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Check if docker-compose or docker compose
|
||||
if docker compose version &> /dev/null; then
|
||||
COMPOSE_CMD="docker compose"
|
||||
else
|
||||
COMPOSE_CMD="docker-compose"
|
||||
fi
|
||||
|
||||
$COMPOSE_CMD -f docker/docker-compose.prod.yml up -d
|
||||
|
||||
echo -e "${GREEN}✓ Services started${NC}\n"
|
||||
}
|
||||
|
||||
# Show status
|
||||
show_status() {
|
||||
# Load environment variables from .env if it exists
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
set -a
|
||||
source "$ENV_FILE"
|
||||
set +a
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}=== Installation Complete ===${NC}\n"
|
||||
echo -e "Services are starting up. Please wait a moment for them to be ready.\n"
|
||||
echo -e "Access points:"
|
||||
echo -e " - Frontend: http://localhost:${FRONTEND_PORT:-80}"
|
||||
echo -e " - Backend API: http://localhost:${BACKEND_PORT:-8500}"
|
||||
echo -e " - Database: localhost:${POSTGRES_PORT:-5432}\n"
|
||||
echo -e "To view logs:"
|
||||
echo -e " docker-compose -f docker/docker-compose.prod.yml logs -f\n"
|
||||
echo -e "To stop services:"
|
||||
echo -e " docker-compose -f docker/docker-compose.prod.yml down\n"
|
||||
echo -e "To restart services:"
|
||||
echo -e " docker-compose -f docker/docker-compose.prod.yml restart\n"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
check_requirements
|
||||
create_env_file
|
||||
build_images
|
||||
start_services
|
||||
init_database
|
||||
show_status
|
||||
|
||||
echo -e "${YELLOW}Waiting for services to be healthy...${NC}"
|
||||
sleep 10
|
||||
|
||||
# Load environment variables for health check
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
set -a
|
||||
source "$ENV_FILE"
|
||||
set +a
|
||||
fi
|
||||
|
||||
# Check service health
|
||||
echo -e "\n${YELLOW}Checking service health...${NC}"
|
||||
sleep 5 # Give services a bit more time
|
||||
if curl -f http://localhost:${BACKEND_PORT:-8500}/health &> /dev/null; then
|
||||
echo -e "${GREEN}✓ Backend is healthy${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠ Backend is still starting up. Check logs with: docker-compose -f docker/docker-compose.prod.yml logs backend${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main
|
||||
|
||||
Reference in New Issue
Block a user