from fastapi import FastAPI, HTTPException, Depends, Header, Request from fastapi.responses import JSONResponse, StreamingResponse from fastapi.security import APIKeyHeader from pydantic import BaseModel, ConfigDict, Field from typing import List, Dict, Any, Optional, Union, Literal import base64 import re import json import time import os import glob import random from google.oauth2 import service_account import config from google.genai import types from google import genai client = None app = FastAPI(title="OpenAI to Gemini Adapter") # API Key security scheme api_key_header = APIKeyHeader(name="Authorization", auto_error=False) # Dependency for API key validation async def get_api_key(authorization: Optional[str] = Header(None)): if authorization is None: raise HTTPException( status_code=401, detail="Missing API key. Please include 'Authorization: Bearer YOUR_API_KEY' header." ) # Check if the header starts with "Bearer " if not authorization.startswith("Bearer "): raise HTTPException( status_code=401, detail="Invalid API key format. Use 'Authorization: Bearer YOUR_API_KEY'" ) # Extract the API key api_key = authorization.replace("Bearer ", "") # Validate the API key if not config.validate_api_key(api_key): raise HTTPException( status_code=401, detail="Invalid API key" ) return api_key # Credential Manager for handling multiple service accounts class CredentialManager: def __init__(self, default_credentials_dir="/app/credentials"): # Use environment variable if set, otherwise use default self.credentials_dir = os.environ.get("CREDENTIALS_DIR", default_credentials_dir) self.credentials_files = [] self.current_index = 0 self.credentials = None self.project_id = None self.load_credentials_list() def load_credentials_list(self): """Load the list of available credential files""" # Look for all .json files in the credentials directory pattern = os.path.join(self.credentials_dir, "*.json") self.credentials_files = glob.glob(pattern) if not self.credentials_files: print(f"No credential files found in {self.credentials_dir}") return False print(f"Found {len(self.credentials_files)} credential files: {[os.path.basename(f) for f in self.credentials_files]}") return True def refresh_credentials_list(self): """Refresh the list of credential files (useful if files are added/removed)""" old_count = len(self.credentials_files) self.load_credentials_list() new_count = len(self.credentials_files) if old_count != new_count: print(f"Credential files updated: {old_count} -> {new_count}") return len(self.credentials_files) > 0 def get_next_credentials(self): """Rotate to the next credential file and load it""" if not self.credentials_files: return None, None # Get the next credential file in rotation file_path = self.credentials_files[self.current_index] self.current_index = (self.current_index + 1) % len(self.credentials_files) try: credentials = service_account.Credentials.from_service_account_file(file_path,scopes=['https://www.googleapis.com/auth/cloud-platform']) project_id = credentials.project_id print(f"Loaded credentials from {file_path} for project: {project_id}") self.credentials = credentials self.project_id = project_id return credentials, project_id except Exception as e: print(f"Error loading credentials from {file_path}: {e}") # Try the next file if this one fails if len(self.credentials_files) > 1: print("Trying next credential file...") return self.get_next_credentials() return None, None def get_random_credentials(self): """Get a random credential file and load it""" if not self.credentials_files: return None, None # Choose a random credential file file_path = random.choice(self.credentials_files) try: credentials = service_account.Credentials.from_service_account_file(file_path,scopes=['https://www.googleapis.com/auth/cloud-platform']) project_id = credentials.project_id print(f"Loaded credentials from {file_path} for project: {project_id}") self.credentials = credentials self.project_id = project_id return credentials, project_id except Exception as e: print(f"Error loading credentials from {file_path}: {e}") # Try another random file if this one fails if len(self.credentials_files) > 1: print("Trying another credential file...") return self.get_random_credentials() return None, None # Initialize the credential manager credential_manager = CredentialManager() # Define data models class ImageUrl(BaseModel): url: str class ContentPartImage(BaseModel): type: Literal["image_url"] image_url: ImageUrl class ContentPartText(BaseModel): type: Literal["text"] text: str class OpenAIMessage(BaseModel): role: str content: Union[str, List[Union[ContentPartText, ContentPartImage, Dict[str, Any]]]] class OpenAIRequest(BaseModel): model: str messages: List[OpenAIMessage] temperature: Optional[float] = 1.0 max_tokens: Optional[int] = None top_p: Optional[float] = 1.0 top_k: Optional[int] = None stream: Optional[bool] = False stop: Optional[List[str]] = None presence_penalty: Optional[float] = None frequency_penalty: Optional[float] = None seed: Optional[int] = None logprobs: Optional[int] = None response_logprobs: Optional[bool] = None n: Optional[int] = None # Maps to candidate_count in Vertex AI # Allow extra fields to pass through without causing validation errors model_config = ConfigDict(extra='allow') # Configure authentication def init_vertex_ai(): global client # Ensure we modify the global client variable try: # Priority 1: Check for credentials JSON content in environment variable (Hugging Face) credentials_json_str = os.environ.get("GOOGLE_CREDENTIALS_JSON") if credentials_json_str: try: print(f"Found GOOGLE_CREDENTIALS_JSON environment variable, length: {len(credentials_json_str)} characters") # Try to parse the JSON try: credentials_info = json.loads(credentials_json_str) print(f"Successfully parsed JSON from GOOGLE_CREDENTIALS_JSON") # Check if the parsed JSON has the expected structure if not isinstance(credentials_info, dict): print(f"ERROR: Parsed JSON is not a dictionary, type: {type(credentials_info)}") raise ValueError("Credentials JSON must be a dictionary") # Check for required fields in the service account JSON required_fields = ["type", "project_id", "private_key_id", "private_key", "client_email"] missing_fields = [field for field in required_fields if field not in credentials_info] if missing_fields: print(f"ERROR: Missing required fields in credentials JSON: {missing_fields}") raise ValueError(f"Credentials JSON missing required fields: {missing_fields}") print(f"Credentials JSON contains all required fields") except json.JSONDecodeError as json_err: print(f"ERROR: Failed to parse GOOGLE_CREDENTIALS_JSON as JSON: {json_err}") # Print a small sample of the string for debugging (avoid printing the whole thing for security) safe_sample = credentials_json_str[:20] + "..." if len(credentials_json_str) > 20 else credentials_json_str print(f"First few characters: {safe_sample}") raise # Create credentials from the parsed JSON info (json.loads should handle \n) try: # DEBUG: Log snippet of private key before passing to library pk_value = credentials_info.get("private_key", "KEY_NOT_FOUND") if pk_value != "KEY_NOT_FOUND": print(f"DEBUG: Snippet of private_key passed to from_service_account_info:\n{pk_value[:30]}...\n...{pk_value[-30:]}") else: print("DEBUG: private_key not found in credentials_info dict.") credentials = service_account.Credentials.from_service_account_info( credentials_info, # Pass the dictionary directly scopes=['https://www.googleapis.com/auth/cloud-platform'] ) project_id = credentials.project_id print(f"Successfully created credentials object for project: {project_id}") except Exception as cred_err: print(f"ERROR: Failed to create credentials from service account info: {cred_err}") raise # Initialize the client with the credentials try: client = genai.Client(vertexai=True, credentials=credentials, project=project_id, location="us-central1") print(f"Initialized Vertex AI using GOOGLE_CREDENTIALS_JSON env var for project: {project_id}") except Exception as client_err: print(f"ERROR: Failed to initialize genai.Client: {client_err}") raise return True except Exception as e: print(f"Error loading credentials from GOOGLE_CREDENTIALS_JSON: {e}") # Fall through to other methods if this fails # Priority 2: Try to use the credential manager to get credentials from files print(f"Trying credential manager (directory: {credential_manager.credentials_dir})") credentials, project_id = credential_manager.get_next_credentials() if credentials and project_id: try: client = genai.Client(vertexai=True, credentials=credentials, project=project_id, location="us-central1") print(f"Initialized Vertex AI using Credential Manager for project: {project_id}") return True except Exception as e: print(f"ERROR: Failed to initialize client with credentials from Credential Manager: {e}") # Priority 3: Fall back to GOOGLE_APPLICATION_CREDENTIALS environment variable (file path) file_path = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") if file_path: print(f"Checking GOOGLE_APPLICATION_CREDENTIALS file path: {file_path}") if os.path.exists(file_path): try: print(f"File exists, attempting to load credentials") credentials = service_account.Credentials.from_service_account_file( file_path, scopes=['https://www.googleapis.com/auth/cloud-platform'] ) project_id = credentials.project_id print(f"Successfully loaded credentials from file for project: {project_id}") try: client = genai.Client(vertexai=True, credentials=credentials, project=project_id, location="us-central1") print(f"Initialized Vertex AI using GOOGLE_APPLICATION_CREDENTIALS file path for project: {project_id}") return True except Exception as client_err: print(f"ERROR: Failed to initialize client with credentials from file: {client_err}") except Exception as e: print(f"ERROR: Failed to load credentials from GOOGLE_APPLICATION_CREDENTIALS path {file_path}: {e}") else: print(f"ERROR: GOOGLE_APPLICATION_CREDENTIALS file does not exist at path: {file_path}") # If none of the methods worked print(f"ERROR: No valid credentials found. Tried all methods:") print(f" 1. GOOGLE_CREDENTIALS_JSON: {'Present' if os.environ.get('GOOGLE_CREDENTIALS_JSON') else 'Not found'}") print(f" 2. Credential Manager: Directory {credential_manager.credentials_dir}, Files found: {len(credential_manager.credentials_files)}") print(f" 3. GOOGLE_APPLICATION_CREDENTIALS: {'Present' if os.environ.get('GOOGLE_APPLICATION_CREDENTIALS') else 'Not found'}") return False except Exception as e: print(f"Error initializing authentication: {e}") return False # Initialize Vertex AI at startup @app.on_event("startup") async def startup_event(): if not init_vertex_ai(): print("WARNING: Failed to initialize Vertex AI authentication") # Conversion functions def create_gemini_prompt(messages: List[OpenAIMessage]) -> Union[str, List[Any]]: """ Convert OpenAI messages to Gemini format. Returns either a string prompt or a list of content parts if images are present. """ # Check if any message contains image content has_images = False for message in messages: if isinstance(message.content, list): for part in message.content: if isinstance(part, dict) and part.get('type') == 'image_url': has_images = True break elif isinstance(part, ContentPartImage): has_images = True break if has_images: break # If no images, use the text-only format if not has_images: prompt = "" # Extract system message if present system_message = None for message in messages: if message.role == "system": # Handle both string and list[dict] content types if isinstance(message.content, str): system_message = message.content elif isinstance(message.content, list) and message.content and isinstance(message.content[0], dict) and 'text' in message.content[0]: system_message = message.content[0]['text'] else: # Handle unexpected format or raise error? For now, assume it's usable or skip. system_message = str(message.content) # Fallback, might need refinement break # If system message exists, prepend it if system_message: prompt += f"System: {system_message}\n\n" # Add other messages for message in messages: if message.role == "system": continue # Already handled # Handle both string and list[dict] content types content_text = "" if isinstance(message.content, str): content_text = message.content elif isinstance(message.content, list) and message.content and isinstance(message.content[0], dict) and 'text' in message.content[0]: content_text = message.content[0]['text'] else: # Fallback for unexpected format content_text = str(message.content) if message.role == "user": prompt += f"Human: {content_text}\n" elif message.role == "assistant": prompt += f"AI: {content_text}\n" # Add final AI prompt if last message was from user if messages[-1].role == "user": prompt += "AI: " return prompt # If images are present, create a list of content parts gemini_contents = [] # Extract system message if present and add it first for message in messages: if message.role == "system": if isinstance(message.content, str): gemini_contents.append(f"System: {message.content}") elif isinstance(message.content, list): # Extract text from system message system_text = "" for part in message.content: if isinstance(part, dict) and part.get('type') == 'text': system_text += part.get('text', '') elif isinstance(part, ContentPartText): system_text += part.text if system_text: gemini_contents.append(f"System: {system_text}") break # Process user and assistant messages for message in messages: if message.role == "system": continue # Already handled # For string content, add as text if isinstance(message.content, str): prefix = "Human: " if message.role == "user" else "AI: " gemini_contents.append(f"{prefix}{message.content}") # For list content, process each part elif isinstance(message.content, list): # First collect all text parts text_content = "" for part in message.content: # Handle text parts if isinstance(part, dict) and part.get('type') == 'text': text_content += part.get('text', '') elif isinstance(part, ContentPartText): text_content += part.text # Add the combined text content if any if text_content: prefix = "Human: " if message.role == "user" else "AI: " gemini_contents.append(f"{prefix}{text_content}") # Then process image parts for part in message.content: # Handle image parts if isinstance(part, dict) and part.get('type') == 'image_url': image_url = part.get('image_url', {}).get('url', '') if image_url.startswith('data:'): # Extract mime type and base64 data mime_match = re.match(r'data:([^;]+);base64,(.+)', image_url) if mime_match: mime_type, b64_data = mime_match.groups() image_bytes = base64.b64decode(b64_data) gemini_contents.append(types.Part.from_bytes(data=image_bytes, mime_type=mime_type)) elif isinstance(part, ContentPartImage): image_url = part.image_url.url if image_url.startswith('data:'): # Extract mime type and base64 data mime_match = re.match(r'data:([^;]+);base64,(.+)', image_url) if mime_match: mime_type, b64_data = mime_match.groups() image_bytes = base64.b64decode(b64_data) gemini_contents.append(types.Part.from_bytes(data=image_bytes, mime_type=mime_type)) return gemini_contents def create_generation_config(request: OpenAIRequest) -> Dict[str, Any]: config = {} # Basic parameters that were already supported if request.temperature is not None: config["temperature"] = request.temperature if request.max_tokens is not None: config["max_output_tokens"] = request.max_tokens if request.top_p is not None: config["top_p"] = request.top_p if request.top_k is not None: config["top_k"] = request.top_k if request.stop is not None: config["stop_sequences"] = request.stop # Additional parameters with direct mappings if request.presence_penalty is not None: config["presence_penalty"] = request.presence_penalty if request.frequency_penalty is not None: config["frequency_penalty"] = request.frequency_penalty if request.seed is not None: config["seed"] = request.seed if request.logprobs is not None: config["logprobs"] = request.logprobs if request.response_logprobs is not None: config["response_logprobs"] = request.response_logprobs # Map OpenAI's 'n' parameter to Vertex AI's 'candidate_count' if request.n is not None: config["candidate_count"] = request.n return config # Response format conversion def convert_to_openai_format(gemini_response, model: str) -> Dict[str, Any]: # Handle multiple candidates if present if hasattr(gemini_response, 'candidates') and len(gemini_response.candidates) > 1: choices = [] for i, candidate in enumerate(gemini_response.candidates): choices.append({ "index": i, "message": { "role": "assistant", "content": candidate.text }, "finish_reason": "stop" }) else: # Handle single response (backward compatibility) choices = [ { "index": 0, "message": { "role": "assistant", "content": gemini_response.text }, "finish_reason": "stop" } ] # Include logprobs if available for i, choice in enumerate(choices): if hasattr(gemini_response, 'candidates') and i < len(gemini_response.candidates): candidate = gemini_response.candidates[i] if hasattr(candidate, 'logprobs'): choice["logprobs"] = candidate.logprobs return { "id": f"chatcmpl-{int(time.time())}", "object": "chat.completion", "created": int(time.time()), "model": model, "choices": choices, "usage": { "prompt_tokens": 0, # Would need token counting logic "completion_tokens": 0, "total_tokens": 0 } } def convert_chunk_to_openai(chunk, model: str, response_id: str, candidate_index: int = 0) -> str: chunk_content = chunk.text if hasattr(chunk, 'text') else "" chunk_data = { "id": response_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": model, "choices": [ { "index": candidate_index, "delta": { "content": chunk_content }, "finish_reason": None } ] } # Add logprobs if available if hasattr(chunk, 'logprobs'): chunk_data["choices"][0]["logprobs"] = chunk.logprobs return f"data: {json.dumps(chunk_data)}\n\n" def create_final_chunk(model: str, response_id: str, candidate_count: int = 1) -> str: choices = [] for i in range(candidate_count): choices.append({ "index": i, "delta": {}, "finish_reason": "stop" }) final_chunk = { "id": response_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": model, "choices": choices } return f"data: {json.dumps(final_chunk)}\n\n" # /v1/models endpoint @app.get("/v1/models") async def list_models(api_key: str = Depends(get_api_key)): # Based on current information for Vertex AI models models = [ { "id": "gemini-2.5-pro-exp-03-25", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-2.5-pro-exp-03-25", "parent": None, }, { "id": "gemini-2.5-pro-exp-03-25-search", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-2.5-pro-exp-03-25", "parent": None, }, { "id": "gemini-2.0-flash", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-2.0-flash", "parent": None, }, { "id": "gemini-2.0-flash-search", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-2.0-flash", "parent": None, }, { "id": "gemini-2.0-flash-lite", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-2.0-flash-lite", "parent": None, }, { "id": "gemini-2.0-flash-lite-search", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-2.0-flash-lite", "parent": None, }, { "id": "gemini-2.0-pro-exp-02-05", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-2.0-pro-exp-02-05", "parent": None, }, { "id": "gemini-1.5-flash", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-1.5-flash", "parent": None, }, { "id": "gemini-1.5-flash-8b", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-1.5-flash-8b", "parent": None, }, { "id": "gemini-1.5-pro", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-1.5-pro", "parent": None, }, { "id": "gemini-1.0-pro-002", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-1.0-pro-002", "parent": None, }, { "id": "gemini-1.0-pro-vision-001", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-1.0-pro-vision-001", "parent": None, }, { "id": "gemini-embedding-exp", "object": "model", "created": int(time.time()), "owned_by": "google", "permission": [], "root": "gemini-embedding-exp", "parent": None, } ] return {"object": "list", "data": models} # Main chat completion endpoint # OpenAI-compatible error response def create_openai_error_response(status_code: int, message: str, error_type: str) -> Dict[str, Any]: return { "error": { "message": message, "type": error_type, "code": status_code, "param": None, } } @app.post("/v1/chat/completions") async def chat_completions(request: OpenAIRequest, api_key: str = Depends(get_api_key)): try: # Validate model availability models_response = await list_models() if not request.model or not any(model["id"] == request.model for model in models_response.get("data", [])): error_response = create_openai_error_response( 400, f"Model '{request.model}' not found", "invalid_request_error" ) return JSONResponse(status_code=400, content=error_response) # Check if this is a grounded search model is_grounded_search = request.model.endswith("-search") # Extract the base model name (remove -search suffix if present) gemini_model = request.model.replace("-search", "") if is_grounded_search else request.model # Create generation config generation_config = create_generation_config(request) # Use the globally initialized client (from startup) global client if client is None: # This should ideally not happen if startup was successful error_response = create_openai_error_response( 500, "Vertex AI client not initialized", "server_error" ) return JSONResponse(status_code=500, content=error_response) print(f"Using globally initialized client.") # Initialize Gemini model search_tool = types.Tool(google_search=types.GoogleSearch()) safety_settings = [ types.SafetySetting( category="HARM_CATEGORY_HATE_SPEECH", threshold="OFF" ),types.SafetySetting( category="HARM_CATEGORY_DANGEROUS_CONTENT", threshold="OFF" ),types.SafetySetting( category="HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold="OFF" ),types.SafetySetting( category="HARM_CATEGORY_HARASSMENT", threshold="OFF" )] generation_config["safety_settings"] = safety_settings if is_grounded_search: generation_config["tools"] = [search_tool] # Create prompt from messages prompt = create_gemini_prompt(request.messages) if request.stream: # Handle streaming response async def stream_generator(): response_id = f"chatcmpl-{int(time.time())}" candidate_count = request.n or 1 try: # For streaming, we can only handle one candidate at a time # If multiple candidates are requested, we'll generate them sequentially for candidate_index in range(candidate_count): # Generate content with streaming # Handle both string and list content formats (for images) responses = client.models.generate_content_stream( model=gemini_model, contents=prompt, # This can be either a string or a list of content parts config=generation_config, ) # Convert and yield each chunk for response in responses: yield convert_chunk_to_openai(response, request.model, response_id, candidate_index) # Send final chunk with all candidates yield create_final_chunk(request.model, response_id, candidate_count) yield "data: [DONE]\n\n" except Exception as stream_error: # Format streaming errors in SSE format error_msg = f"Error during streaming: {str(stream_error)}" print(error_msg) error_response = create_openai_error_response(500, error_msg, "server_error") yield f"data: {json.dumps(error_response)}\n\n" yield "data: [DONE]\n\n" return StreamingResponse( stream_generator(), media_type="text/event-stream" ) else: # Handle non-streaming response try: # If multiple candidates are requested, set candidate_count if request.n and request.n > 1: # Make sure generation_config has candidate_count set if "candidate_count" not in generation_config: generation_config["candidate_count"] = request.n # Handle both string and list content formats (for images) response = client.models.generate_content( model=gemini_model, contents=prompt, # This can be either a string or a list of content parts config=generation_config, ) openai_response = convert_to_openai_format(response, request.model) return JSONResponse(content=openai_response) except Exception as generate_error: error_msg = f"Error generating content: {str(generate_error)}" print(error_msg) error_response = create_openai_error_response(500, error_msg, "server_error") return JSONResponse(status_code=500, content=error_response) except Exception as e: error_msg = f"Error processing request: {str(e)}" print(error_msg) error_response = create_openai_error_response(500, error_msg, "server_error") return JSONResponse(status_code=500, content=error_response) # Health check endpoint @app.get("/health") def health_check(api_key: str = Depends(get_api_key)): # Refresh the credentials list to get the latest status credential_manager.refresh_credentials_list() return { "status": "ok", "credentials": { "available": len(credential_manager.credentials_files), "files": [os.path.basename(f) for f in credential_manager.credentials_files], "current_index": credential_manager.current_index } } # Diagnostic endpoint for troubleshooting credential issues @app.get("/debug/credentials") def debug_credentials(api_key: str = Depends(get_api_key)): """ Diagnostic endpoint to check credential configuration without actually authenticating. This helps troubleshoot issues with credential setup, especially on Hugging Face. """ # Check GOOGLE_CREDENTIALS_JSON creds_json = os.environ.get("GOOGLE_CREDENTIALS_JSON") creds_json_status = { "present": creds_json is not None, "length": len(creds_json) if creds_json else 0, "parse_status": "not_attempted" } # Try to parse the JSON if present if creds_json: try: creds_info = json.loads(creds_json) # Check for required fields required_fields = ["type", "project_id", "private_key_id", "private_key", "client_email"] missing_fields = [field for field in required_fields if field not in creds_info] creds_json_status.update({ "parse_status": "success", "is_dict": isinstance(creds_info, dict), "missing_required_fields": missing_fields, "project_id": creds_info.get("project_id", "not_found"), # Include a safe sample of the private key to check if it's properly formatted "private_key_sample": creds_info.get("private_key", "not_found")[:10] + "..." if "private_key" in creds_info else "not_found" }) except json.JSONDecodeError as e: creds_json_status.update({ "parse_status": "error", "error": str(e), "sample": creds_json[:20] + "..." if len(creds_json) > 20 else creds_json }) # Check credential files credential_manager.refresh_credentials_list() # Check GOOGLE_APPLICATION_CREDENTIALS app_creds_path = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") app_creds_status = { "present": app_creds_path is not None, "path": app_creds_path, "exists": os.path.exists(app_creds_path) if app_creds_path else False } return { "environment": { "GOOGLE_CREDENTIALS_JSON": creds_json_status, "CREDENTIALS_DIR": { "path": credential_manager.credentials_dir, "exists": os.path.exists(credential_manager.credentials_dir), "files_found": len(credential_manager.credentials_files), "files": [os.path.basename(f) for f in credential_manager.credentials_files] }, "GOOGLE_APPLICATION_CREDENTIALS": app_creds_status }, "recommendations": [ "Ensure GOOGLE_CREDENTIALS_JSON contains the full, properly formatted JSON content of your service account key", "Check for any special characters or line breaks that might need proper escaping", "Verify that the service account has the necessary permissions for Vertex AI" ] }