vidhanm commited on
Commit
1792bb4
·
1 Parent(s): 4670dfa

some changes

Browse files
Files changed (2) hide show
  1. Dockerfile +14 -4
  2. app.py +24 -1
Dockerfile CHANGED
@@ -1,22 +1,32 @@
1
- # Use a slim Python base image. For GPU, you'd need a CUDA-enabled base.
2
  FROM python:3.9-slim
3
 
4
  # Set the working directory in the container
5
  WORKDIR /app
6
 
7
- # Install git (useful for some Hugging Face model/tokenizer downloads that might use it)
8
- # Also install common build tools often needed for Python packages
 
 
 
 
 
9
  RUN apt-get update && apt-get install -y \
10
  git \
11
  build-essential \
12
  && rm -rf /var/lib/apt/lists/*
13
 
 
 
 
 
 
 
14
  # Copy the requirements file first to leverage Docker layer caching
15
  COPY requirements.txt requirements.txt
16
 
17
  # Install Python dependencies
18
  # --no-cache-dir reduces image size
19
- # --prefer-binary can speed up builds for packages with binary distributions
20
  RUN pip install --no-cache-dir --prefer-binary -r requirements.txt
21
 
22
  # Copy the application code into the container
 
1
+ # Use a slim Python base image.
2
  FROM python:3.9-slim
3
 
4
  # Set the working directory in the container
5
  WORKDIR /app
6
 
7
+ # Set Hugging Face cache directory and Gradio temp/flagging dir
8
+ # These will be within /app or /tmp, which we can make writable.
9
+ ENV HF_HOME=/app/.cache/huggingface
10
+ ENV GRADIO_TEMP_DIR=/tmp/gradio_tmp # For Gradio's own temp files
11
+ ENV GRADIO_FLAGGING_DIR=/tmp/gradio_flags # For Gradio flagging data
12
+
13
+ # Install git and build-essential (good practice for some pip installs)
14
  RUN apt-get update && apt-get install -y \
15
  git \
16
  build-essential \
17
  && rm -rf /var/lib/apt/lists/*
18
 
19
+ # Create the cache and temp directories and make them writable by any user.
20
+ # The user running the app inside the container (often root by default in simple Dockerfiles,
21
+ # or a non-root user in managed environments like Spaces) needs to write here.
22
+ RUN mkdir -p $HF_HOME $GRADIO_TEMP_DIR $GRADIO_FLAGGING_DIR && \
23
+ chmod -R 777 $HF_HOME $GRADIO_TEMP_DIR $GRADIO_FLAGGING_DIR
24
+
25
  # Copy the requirements file first to leverage Docker layer caching
26
  COPY requirements.txt requirements.txt
27
 
28
  # Install Python dependencies
29
  # --no-cache-dir reduces image size
 
30
  RUN pip install --no-cache-dir --prefer-binary -r requirements.txt
31
 
32
  # Copy the application code into the container
app.py CHANGED
@@ -90,6 +90,24 @@ This Space uses the `lusxvr/nanoVLM-222M` model.
90
  # Example image from COCO dataset
91
  example_image_url = "http://images.cocodataset.org/val2017/000000039769.jpg" # A cat and a remote
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  iface = gr.Interface(
94
  fn=generate_text_for_image,
95
  inputs=[
@@ -104,9 +122,14 @@ iface = gr.Interface(
104
  [example_image_url, "Describe the image in detail."],
105
  [example_image_url, "What objects are on the sofa?"],
106
  ],
107
- cache_examples=True # Cache results for examples to load faster
 
 
 
 
108
  )
109
 
 
110
  if __name__ == "__main__":
111
  # For Hugging Face Spaces, it's common to launch with server_name="0.0.0.0"
112
  # The Space infrastructure handles the public URL and port mapping.
 
90
  # Example image from COCO dataset
91
  example_image_url = "http://images.cocodataset.org/val2017/000000039769.jpg" # A cat and a remote
92
 
93
+ # iface = gr.Interface(
94
+ # fn=generate_text_for_image,
95
+ # inputs=[
96
+ # gr.Image(type="pil", label="Upload Image"),
97
+ # gr.Textbox(label="Your Prompt/Question", info="e.g., 'What is this a picture of?', 'Describe the main subject.', 'How many animals are there?'")
98
+ # ],
99
+ # outputs=gr.Textbox(label="Generated Text", show_copy_button=True),
100
+ # title="Interactive nanoVLM-222M Demo",
101
+ # description=description,
102
+ # examples=[
103
+ # [example_image_url, "a photo of a"],
104
+ # [example_image_url, "Describe the image in detail."],
105
+ # [example_image_url, "What objects are on the sofa?"],
106
+ # ],
107
+ # cache_examples=True # Cache results for examples to load faster
108
+ # )
109
+ # ... (other parts of your app.py)
110
+
111
  iface = gr.Interface(
112
  fn=generate_text_for_image,
113
  inputs=[
 
122
  [example_image_url, "Describe the image in detail."],
123
  [example_image_url, "What objects are on the sofa?"],
124
  ],
125
+ cache_examples=True,
126
+ allow_flagging="never" # Add this line to disable flagging
127
+ # OR, if you want flagging, configure its directory (ensure GRADIO_FLAGGING_DIR is set in Dockerfile):
128
+ # import os
129
+ # flagging_dir=os.environ.get("GRADIO_FLAGGING_DIR"),
130
  )
131
 
132
+
133
  if __name__ == "__main__":
134
  # For Hugging Face Spaces, it's common to launch with server_name="0.0.0.0"
135
  # The Space infrastructure handles the public URL and port mapping.