FROM python:3.9-slim WORKDIR /app # Install system dependencies RUN apt-get update && apt-get install -y \ git \ build-essential \ libsndfile1 \ ffmpeg \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* # Create and set permissions for cache directories RUN mkdir -p /.cache && chmod 777 /.cache RUN mkdir -p /root/.cache && chmod 777 /root/.cache RUN mkdir -p /usr/local/share/nltk_data && \ chmod -R 777 /usr/local/share/nltk_data # Copy requirements file COPY requirements.txt . # Install Python dependencies RUN pip install --no-cache-dir -U pip && \ pip install --no-cache-dir -r requirements.txt # Pre-download NLTK data RUN python -c "import nltk; nltk.download('punkt', download_dir='$NLTK_DATA'); nltk.download('stopwords', download_dir='$NLTK_DATA'); nltk.download('wordnet', download_dir='$NLTK_DATA')" # Clone VERSA repository RUN git clone https://github.com/wavlab-speech/versa.git && \ cd versa && \ pip install -e . # Set up data directories RUN mkdir -p /app/data/configs /app/data/uploads /app/data/results && \ chmod -R 777 /app/data # Copy universal metrics YAML file COPY universal_metrics.yaml /app/data/configs/ # Copy application code COPY app.py . # Create installation complete indicator RUN touch /app/versa/.installation_complete # Set port EXPOSE 7860 # Set environment variables ENV GRADIO_SERVER_NAME="0.0.0.0" ENV NLTK_DATA=/usr/local/share/nltk_data ENV PYTHONUNBUFFERED=1 ENV HF_HOME=/root/.cache/huggingface # Set for Numba cache (error from https://github.com/librosa/librosa/issues/1156) RUN mkdir -m 777 /tmp/NUMBA_CACHE_DIR /tmp/MPLCONFIGDIR ENV NUMBA_CACHE_DIR=/tmp/NUMBA_CACHE_DIR/ ENV MPLCONFIGDIR=/tmp/MPLCONFIGDIR/ # Run the application CMD ["python", "app.py"]