File size: 7,549 Bytes
a5da53f
5c5fc73
a5da53f
 
 
 
5c5fc73
a5da53f
 
 
6d8897a
b1e550a
a5da53f
 
5c5fc73
b1e550a
 
 
3d9da47
 
 
 
 
 
 
 
 
 
b1e550a
a5da53f
076ba93
a5da53f
 
a7ac99d
 
 
a5da53f
b1e550a
a5da53f
b866af4
b1e550a
6611aa7
a5da53f
 
 
 
076ba93
b1e550a
 
 
 
 
 
 
3d9da47
 
 
b1e550a
 
076ba93
3d9da47
b1e550a
 
1487ad7
 
 
 
b1e550a
dc9e40e
 
 
 
 
 
 
 
 
 
 
343b864
 
dc9e40e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1487ad7
659ea7a
 
dc9e40e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343b864
dc9e40e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343b864
dc9e40e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1487ad7
659ea7a
 
dc9e40e
 
 
343b864
1487ad7
 
 
b1e550a
1487ad7
5c5fc73
 
 
1487ad7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
FROM ghcr.io/developmentseed/titiler:latest

# Python runtime optimizations
ENV PYTHONUNBUFFERED=1 \
    PYTHONDONTWRITEBYTECODE=1 \
    PYTHONWARNINGS=ignore

# CORS settings for public access
ENV TITILER_API_CORS_ORIGINS=* \
    TITILER_API_CORS_ALLOW_METHODS=GET,POST,OPTIONS

# Proxy settings
ENV FORWARDED_ALLOW_IPS=* \
    TITILER_API_ROOT_PATH=""

# ============================================================================
# GDAL/RASTERIO TUNING PARAMETERS
# ============================================================================
# GDAL_CACHEMAX: GDAL block cache in MB (was 75% = ~24GB, now 4GB, orig 8GB)
#               Higher = faster repeated reads, more memory used
# VSI_CACHE_SIZE: VSI cache size in bytes (1GB, was 512MB, your orig 512MB)
#                 This caches remote file chunks. Critical for S3/HTTP COGs
# CPL_VSIL_CURL_CACHE_SIZE: CURL cache in bytes (2GB, was 1GB, your orig 1GB)
#                           Caches HTTP range requests. Bigger = fewer re-fetches
# CPL_VSIL_CURL_CHUNK_SIZE: Size of chunks for range requests (10MB). Tune based on COG tile size
# GDAL_NUM_THREADS: Number of GDAL threads (was ALL_CPUS=8, conservative=2, now 6)
#                   More threads = faster parallel reads but more CPU contention
# GDAL_MAX_DATASET_POOL_SIZE: Max open datasets (was 450, now 300). Lower = less memory

ENV CPL_TMPDIR=/tmp \
    GDAL_CACHEMAX=75% \
    GDAL_INGESTED_BYTES_AT_OPEN=32768 \
    GDAL_DISABLE_READDIR_ON_OPEN=EMPTY_DIR \
    GDAL_HTTP_MERGE_CONSECUTIVE_RANGES=YES \
    GDAL_HTTP_MULTIPLEX=YES \
    GDAL_HTTP_VERSION=2 \
    VSI_CACHE=TRUE \
    VSI_CACHE_SIZE=1073741824 \
    CPL_VSIL_CURL_ALLOWED_EXTENSIONS=.tif,.tiff,.vrt,.jp2,.png,.jpg \
    CPL_VSIL_CURL_USE_HEAD=NO \
    CPL_VSIL_CURL_CACHE_SIZE=2147483648 \
    CPL_VSIL_CURL_CHUNK_SIZE=10485760 \
    GDAL_HTTP_TIMEOUT=30 \
    GDAL_HTTP_CONNECTTIMEOUT=10 \
    GDAL_HTTP_MAX_RETRY=3 \
    GDAL_HTTP_RETRY_DELAY=1 \
    GDAL_NUM_THREADS=6 \
    GDAL_MAX_DATASET_POOL_SIZE=300 \
    PROJ_NETWORK=ON \
    GDAL_ENABLE_WMS_CACHE=YES

# ============================================================================
# TITILER SPECIFIC SETTINGS
# ============================================================================
# MOSAIC_CONCURRENCY: Concurrent mosaic operations (was 4, now 3)
# RIO_TILER_MAX_THREADS: Rasterio tile generation threads per worker

ENV TITILER_API_DISABLE_MOSAIC=FALSE \
    TITILER_API_ENABLE_TILES_CACHE=TRUE \
    MOSAIC_CONCURRENCY=3 \
    RIO_TILER_MAX_THREADS=4

# Install dependencies including rate limiting
RUN pip install gunicorn uvloop slowapi

# Set working directory
WORKDIR /app

# Create gunicorn config file
RUN printf '%s\n' \
'import multiprocessing' \
'import os' \
'' \
'# WORKER CONFIGURATION' \
'workers = 6' \
'worker_class = "uvicorn.workers.UvicornWorker"' \
'worker_connections = 500' \
'' \
'# REQUEST RECYCLING' \
'max_requests = 3000' \
'max_requests_jitter = 300' \
'' \
'# TIMEOUTS' \
'timeout = 60' \
'graceful_timeout = 30' \
'keepalive = 5' \
'' \
'# PERFORMANCE TUNING' \
'backlog = 1024' \
'limit_request_line = 4094' \
'limit_request_fields = 100' \
'limit_request_field_size = 8190' \
'' \
'# SERVER MECHANICS' \
'bind = "0.0.0.0:7860"' \
'daemon = False' \
'reuse_port = True' \
'preload_app = True' \
'' \
'# LOGGING' \
'accesslog = "-"' \
'errorlog = "-"' \
'loglevel = "info"' \
'' \
'def when_ready(server):' \
'    server.log.info("Server ready. Spawning workers")' \
'' \
'def pre_fork(server, worker):' \
'    server.log.info(f"Worker spawned (pid: {worker.pid})")' \
'' \
'def post_fork(server, worker):' \
'    server.log.info(f"Worker initialized (pid: {worker.pid})")' \
'' \
'def worker_exit(server, worker):' \
'    server.log.info(f"Worker exited (pid: {worker.pid})")' \
> /app/gunicorn_config.py

# Create custom app with rate limiting
RUN printf '%s\n' \
'from titiler.application.main import app' \
'from slowapi import Limiter, _rate_limit_exceeded_handler' \
'from slowapi.util import get_remote_address' \
'from slowapi.errors import RateLimitExceeded' \
'from slowapi.middleware import SlowAPIMiddleware' \
'from starlette.middleware.base import BaseHTTPMiddleware' \
'from starlette.responses import Response' \
'import time' \
'import asyncio' \
'from collections import defaultdict, deque' \
'import logging' \
'' \
'logging.basicConfig(level=logging.INFO)' \
'logger = logging.getLogger(__name__)' \
'' \
'# RATE LIMITING CONFIGURATION' \
'limiter = Limiter(' \
'    key_func=get_remote_address,' \
'    default_limits=["400 per minute", "5000 per hour"],' \
'    storage_uri="memory://",' \
'    swallow_errors=True' \
')' \
'' \
'app.state.limiter = limiter' \
'app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)' \
'app.add_middleware(SlowAPIMiddleware)' \
'' \
'# ADAPTIVE BURST PROTECTION' \
'class AdaptiveBurstProtection(BaseHTTPMiddleware):' \
'    def __init__(self, app, burst_size=50, window=1.0, decay_rate=0.9):' \
'        super().__init__(app)' \
'        self.burst_size = burst_size' \
'        self.window = window' \
'        self.decay_rate = decay_rate' \
'        self.requests = defaultdict(lambda: deque(maxlen=burst_size))' \
'        self.delays = defaultdict(float)' \
'    ' \
'    async def dispatch(self, request, call_next):' \
'        client_ip = request.client.host' \
'        now = time.time()' \
'        ' \
'        request_times = self.requests[client_ip]' \
'        request_times.append(now)' \
'        ' \
'        if len(request_times) >= 2:' \
'            time_span = now - request_times[0]' \
'            if time_span > 0:' \
'                current_rate = len(request_times) / time_span' \
'                ' \
'                if current_rate > self.burst_size:' \
'                    self.delays[client_ip] = min(0.5, self.delays[client_ip] + 0.05)' \
'                    await asyncio.sleep(self.delays[client_ip])' \
'                    ' \
'                    if self.delays[client_ip] > 0.1:' \
'                        logger.warning(f"Rate limiting {client_ip}: {current_rate:.1f} req/s")' \
'                        ' \
'                elif self.delays[client_ip] > 0:' \
'                    self.delays[client_ip] *= self.decay_rate' \
'        ' \
'        try:' \
'            response = await call_next(request)' \
'            return response' \
'        except Exception as e:' \
'            logger.error(f"Request failed: {e}")' \
'            return Response(content="Internal error", status_code=500)' \
'' \
'app.add_middleware(AdaptiveBurstProtection, burst_size=75, window=1.0)' \
'' \
'# CACHE HEADERS' \
'@app.middleware("http")' \
'async def add_cache_headers(request, call_next):' \
'    response = await call_next(request)' \
'    path = str(request.url.path)' \
'    ' \
'    if "/tiles/" in path:' \
'        response.headers["Cache-Control"] = "public, max-age=3600, stale-while-revalidate=7200"' \
'    elif "/cog/statistics" in path or "/cog/info" in path:' \
'        response.headers["Cache-Control"] = "public, max-age=600"' \
'    elif "/cog/bounds" in path:' \
'        response.headers["Cache-Control"] = "public, max-age=86400"' \
'    ' \
'    return response' \
'' \
'logger.info("Custom app with rate limiting initialized")' \
> /app/custom_app.py

# Create startup script
RUN printf '%s\n' \
'#!/bin/bash' \
'echo "==============================================="' \

'cd /app' \
'exec gunicorn custom_app:app -c gunicorn_config.py' \
> /app/start.sh

RUN chmod +x /app/start.sh

EXPOSE 7860

CMD ["/app/start.sh"]