openai_integration.rs (5774B)
1 //! Integration tests for OpenAI backend / trial key 2 //! 3 //! These tests verify that the OpenAI backend can connect and stream 4 //! responses using the embedded trial API key. 5 //! 6 //! Run with: cargo test -p notedeck_dave --test openai_integration -- --ignored 7 8 use async_openai::types::{ 9 ChatCompletionRequestMessage, ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequest, 10 }; 11 use async_openai::Client; 12 use futures::StreamExt; 13 use notedeck_dave::config::ModelConfig; 14 15 /// Test that the trial key can authenticate and get a streamed response. 16 #[tokio::test] 17 #[ignore = "Requires network access to OpenAI API"] 18 async fn test_trial_key_streams_response() { 19 let config = ModelConfig::trial(); 20 let client = Client::with_config(config.to_api()); 21 22 let message = ChatCompletionRequestUserMessageArgs::default() 23 .content("Say hello in one word.") 24 .build() 25 .expect("build user message"); 26 27 let request = CreateChatCompletionRequest { 28 model: config.model().to_string(), 29 stream: Some(true), 30 messages: vec![ChatCompletionRequestMessage::User(message)], 31 ..Default::default() 32 }; 33 34 let mut stream = client 35 .chat() 36 .create_stream(request) 37 .await 38 .expect("Failed to create stream - trial key may be invalid or expired"); 39 40 let mut received_text = String::new(); 41 let mut chunk_count = 0; 42 43 while let Some(result) = stream.next().await { 44 let response = result.expect("Stream chunk error"); 45 for choice in &response.choices { 46 if let Some(content) = &choice.delta.content { 47 received_text.push_str(content); 48 chunk_count += 1; 49 } 50 } 51 } 52 53 assert!( 54 !received_text.is_empty(), 55 "Should receive text from OpenAI. Got empty response." 56 ); 57 println!( 58 "Trial key works: received {} chunks, text: {:?}", 59 chunk_count, received_text 60 ); 61 } 62 63 /// Test that a non-streaming request also works with the trial key. 64 #[tokio::test] 65 #[ignore = "Requires network access to OpenAI API"] 66 async fn test_trial_key_non_streaming() { 67 let config = ModelConfig::trial(); 68 let client = Client::with_config(config.to_api()); 69 70 let message = ChatCompletionRequestUserMessageArgs::default() 71 .content("Reply with exactly: OK") 72 .build() 73 .expect("build user message"); 74 75 let request = CreateChatCompletionRequest { 76 model: config.model().to_string(), 77 stream: Some(false), 78 messages: vec![ChatCompletionRequestMessage::User(message)], 79 ..Default::default() 80 }; 81 82 let response = client 83 .chat() 84 .create(request) 85 .await 86 .expect("Failed to create completion - trial key may be invalid or expired"); 87 88 let text = response.choices[0].message.content.as_deref().unwrap_or(""); 89 90 assert!( 91 !text.is_empty(), 92 "Should receive non-empty response from OpenAI" 93 ); 94 println!("Non-streaming response: {:?}", text); 95 } 96 97 /// Diagnostic: check which models the trial key project has access to. 98 #[tokio::test] 99 #[ignore = "Requires network access to OpenAI API"] 100 async fn test_trial_key_model_access() { 101 let config = ModelConfig::trial(); 102 let client = Client::with_config(config.to_api()); 103 104 let models_to_try = ["gpt-5.2", "gpt-4.1-mini", "gpt-4.1-nano", "gpt-4.1"]; 105 106 for model in models_to_try { 107 let message = ChatCompletionRequestUserMessageArgs::default() 108 .content("Say hi") 109 .build() 110 .expect("build user message"); 111 112 let request = CreateChatCompletionRequest { 113 model: model.to_string(), 114 stream: Some(false), 115 messages: vec![ChatCompletionRequestMessage::User(message)], 116 max_tokens: Some(5), 117 ..Default::default() 118 }; 119 120 match client.chat().create(request).await { 121 Ok(_) => println!(" OK: {}", model), 122 Err(e) => println!("FAIL: {} - {}", model, e), 123 } 124 } 125 } 126 127 /// Test that ModelConfig::trial() produces the expected configuration. 128 #[test] 129 fn test_trial_config_values() { 130 let config = ModelConfig::trial(); 131 132 assert!(config.trial, "trial flag should be true"); 133 assert_eq!(config.model(), "gpt-4.1-mini"); 134 assert!( 135 config.api_key().is_some(), 136 "Trial config should have an API key" 137 ); 138 assert!( 139 config.api_key().unwrap().starts_with("sk-"), 140 "Trial API key should start with sk-" 141 ); 142 assert!( 143 config.endpoint().is_none(), 144 "Trial config should use default OpenAI endpoint" 145 ); 146 } 147 148 /// Test that ModelConfig::default() falls back to trial key when no env vars are set. 149 /// This verifies the Android fix (no longer defaults to Remote backend). 150 #[test] 151 fn test_default_config_uses_openai_without_env_vars() { 152 // Note: This test's behavior depends on environment variables. 153 // When DAVE_API_KEY, OPENAI_API_KEY, ANTHROPIC_API_KEY, and CLAUDE_API_KEY 154 // are all unset, it should default to OpenAI with trial key. 155 let config = ModelConfig::default(); 156 157 // If no API keys are set in the environment, we should get OpenAI (not Remote) 158 if std::env::var("DAVE_API_KEY").is_err() 159 && std::env::var("OPENAI_API_KEY").is_err() 160 && std::env::var("ANTHROPIC_API_KEY").is_err() 161 && std::env::var("CLAUDE_API_KEY").is_err() 162 && std::env::var("DAVE_BACKEND").is_err() 163 { 164 assert!( 165 config.trial, 166 "Should be in trial mode when no API keys are set" 167 ); 168 assert!( 169 config.api_key().is_some(), 170 "Should have trial API key when no env vars are set" 171 ); 172 assert_eq!(config.model(), "gpt-4.1-mini"); 173 } 174 }