Gemini Pro: no wait / Azure GPT-4: no wait / Azure GPT-4 Turbo: 3sec / Azure DALL-E: no wait
Hint
{ "uptime": 12668, "endpoints": { "google-ai": "https://ami001-merkava.hf.space/proxy/google-ai", "azure": "https://ami001-merkava.hf.space/proxy/azure/openai", "azure-image": "https://ami001-merkava.hf.space/proxy/azure/openai" }, "proompts": 625, "tookens": "4.85m", "proomptersNow": 2, "google-aiKeys": 39, "azureKeys": 6, "gemini-pro": { "usage": "981.5k tokens", "activeKeys": 39, "revokedKeys": 0, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "azure-gpt4-turbo": { "usage": "947.9k tokens", "activeKeys": 1, "revokedKeys": 0, "proomptersInQueue": 0, "estimatedQueueTime": "3sec" }, "azure-gpt4": { "usage": "0 tokens", "activeKeys": 0, "revokedKeys": 3, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "azure-dall-e": { "usage": "2.92m tokens", "activeKeys": 2, "revokedKeys": 0, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "config": { "gatekeeper": "proxy_key", "maxIpsAutoBan": "true", "textModelRateLimit": "3", "imageModelRateLimit": "2", "maxContextTokensOpenAI": "0", "maxContextTokensAnthropic": "0", "maxOutputTokensOpenAI": "4096", "maxOutputTokensAnthropic": "1024", "allowAwsLogging": "false", "promptLogging": "false", "tokenQuota": { "turbo": "0", "gpt4": "0", "gpt4-32k": "0", "gpt4-turbo": "0", "gpt4o": "0", "dall-e": "0", "claude": "0", "claude-opus": "0", "gemini-pro": "0", "mistral-tiny": "0", "mistral-small": "0", "mistral-medium": "0", "mistral-large": "0", "aws-claude": "0", "aws-claude-opus": "0", "azure-turbo": "0", "azure-gpt4": "0", "azure-gpt4-32k": "0", "azure-gpt4-turbo": "0", "azure-gpt4o": "0", "azure-dall-e": "0" }, "allowOpenAIToolUsage": "false", "allowImagePrompts": "false" }, "build": "930bac0 (main@khanon/oai-reverse-proxy)" }