Shamik commited on
Commit
aa3bf9e
·
verified ·
1 Parent(s): 1508f14

fix: removing the nvidia libaries from requirements.txt.

Browse files

fix: importing project root dir in app module.
fix: using hf_home env in docker file to store the transformers cache.

Files changed (3) hide show
  1. Dockerfile +1 -1
  2. app.py +1 -0
  3. requirements.txt +0 -34
Dockerfile CHANGED
@@ -18,7 +18,7 @@ libssl-dev \
18
 
19
  ENV PYTHONPATH=/app
20
 
21
- ENV TRANSFORMERS_CACHE=/tmp/cache/
22
 
23
 
24
  RUN pip install --no-cache-dir -r requirements.txt
 
18
 
19
  ENV PYTHONPATH=/app
20
 
21
+ ENV HF_HOME=/tmp/cache/
22
 
23
 
24
  RUN pip install --no-cache-dir -r requirements.txt
app.py CHANGED
@@ -1,4 +1,5 @@
1
  from src.insurance_assistants.ui import UI
 
2
 
3
  if __name__ == "__main__":
4
  UI().launch(allowed_paths=[(PROJECT_ROOT_DIR /"data/policy_wordings").as_posix()],
 
1
  from src.insurance_assistants.ui import UI
2
+ from src.insurance_assistants.consts import PROJECT_ROOT_DIR
3
 
4
  if __name__ == "__main__":
5
  UI().launch(allowed_paths=[(PROJECT_ROOT_DIR /"data/policy_wordings").as_posix()],
requirements.txt CHANGED
@@ -315,40 +315,6 @@ numpy==2.2.6
315
  # scipy
316
  # torchvision
317
  # transformers
318
- nvidia-cublas-cu12==12.4.5.8
319
- # via
320
- # nvidia-cudnn-cu12
321
- # nvidia-cusolver-cu12
322
- # torch
323
- nvidia-cuda-cupti-cu12==12.4.127
324
- # via torch
325
- nvidia-cuda-nvrtc-cu12==12.4.127
326
- # via torch
327
- nvidia-cuda-runtime-cu12==12.4.127
328
- # via torch
329
- nvidia-cudnn-cu12==9.1.0.70
330
- # via torch
331
- nvidia-cufft-cu12==11.2.1.3
332
- # via torch
333
- nvidia-curand-cu12==10.3.5.147
334
- # via torch
335
- nvidia-cusolver-cu12==11.6.1.9
336
- # via torch
337
- nvidia-cusparse-cu12==12.3.1.170
338
- # via
339
- # nvidia-cusolver-cu12
340
- # torch
341
- nvidia-cusparselt-cu12==0.6.2
342
- # via torch
343
- nvidia-nccl-cu12==2.21.5
344
- # via torch
345
- nvidia-nvjitlink-cu12==12.4.127
346
- # via
347
- # nvidia-cusolver-cu12
348
- # nvidia-cusparse-cu12
349
- # torch
350
- nvidia-nvtx-cu12==12.4.127
351
- # via torch
352
  openai==1.82.1
353
  # via
354
  # llama-index-agent-openai
 
315
  # scipy
316
  # torchvision
317
  # transformers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
  openai==1.82.1
319
  # via
320
  # llama-index-agent-openai