romybeaute commited on
Commit
6534d2a
·
verified ·
1 Parent(s): e0a6503

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +40 -36
Dockerfile CHANGED
@@ -1,3 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # # ---- Base image ----
2
  # FROM python:3.11-slim
3
 
@@ -60,39 +100,3 @@
60
  # --server.enableCORS=false \
61
  # --server.enableXsrfProtection=false"]
62
 
63
-
64
-
65
-
66
-
67
- # ---- Base image ----
68
- FROM python:3.11-slim
69
-
70
- # Workdir inside the container
71
- WORKDIR /app
72
-
73
- # ---- System dependencies (Lightweight) ----
74
- # We removed cmake/gcc because we will use pre-built wheels
75
- RUN apt-get update && apt-get install -y \
76
- curl \
77
- git \
78
- && rm -rf /var/lib/apt/lists/*
79
-
80
- # ---- Python deps ----
81
- COPY requirements.txt .
82
-
83
- # ---- Install Python Packages ----
84
- # We add a 2nd URL to 'abetlen' to find the pre-built LLM wheel
85
- RUN pip install --no-cache-dir \
86
- --extra-index-url https://download.pytorch.org/whl/cpu \
87
- --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu \
88
- -r requirements.txt
89
-
90
- # ---- NLTK data (punkt + stopwords) ----
91
- RUN python -c "import nltk; nltk.download('punkt_tab'); nltk.download('punkt'); nltk.download('stopwords')"
92
-
93
- # ---- Copy application files ----
94
- COPY . .
95
-
96
- # ---- Command to run the app ----
97
- # We default to the Lite app. Change to app_with_LLM.py in README to switch.
98
- CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]
 
1
+ # ---- Base image ----
2
+ FROM python:3.11-slim
3
+
4
+ # Workdir inside the container
5
+ WORKDIR /app
6
+
7
+ # ---- System dependencies (Lightweight) ----
8
+ RUN apt-get update && apt-get install -y \
9
+ curl \
10
+ git \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # ---- Python deps ----
14
+ COPY requirements.txt .
15
+
16
+ # ---- STEP 1: Install LLM Engine (Force Pre-built Binary) ----
17
+ # We run this BEFORE requirements.txt to ensure the pre-built wheel is prioritized.
18
+ # The --prefer-binary flag stops it from trying to compile from source.
19
+ RUN pip install --no-cache-dir \
20
+ --prefer-binary \
21
+ --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu \
22
+ llama-cpp-python
23
+
24
+ # ---- STEP 2: Install Remaining Packages ----
25
+ # Now install everything else from requirements.txt
26
+ RUN pip install --no-cache-dir \
27
+ --extra-index-url https://download.pytorch.org/whl/cpu \
28
+ -r requirements.txt
29
+
30
+ # ---- NLTK data ----
31
+ RUN python -c "import nltk; nltk.download('punkt_tab'); nltk.download('punkt'); nltk.download('stopwords')"
32
+
33
+ # ---- Copy application files ----
34
+ COPY . .
35
+
36
+ # ---- Command to run the app ----
37
+ # Defaulting to Lite mode. Change 'app.py' to 'app_with_LLM.py' in README to switch.
38
+ CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]
39
+
40
+
41
  # # ---- Base image ----
42
  # FROM python:3.11-slim
43
 
 
100
  # --server.enableCORS=false \
101
  # --server.enableXsrfProtection=false"]
102