[]
DsExcel.NET provides AI functions and model request handlers, allowing you to send user prompts and data to specified large language models, write model return results to target cells, and seamlessly integrate complex artificial intelligence processing workflows into calculation chains. With AI functions, you can easily achieve text query, data analysis, text generation, text translation, and text sentiment analysis in your spreadsheets.
Before using the AI Assistant, please ensure you have obtained a valid API key from OpenAI or other AI service providers.
DsExcel.NET provides the IAIModelRequestHandler interface to help users implement complete custom interaction flows with AI models.
The sendRequestAsync method is used to submit AI requests and return asynchronous results. You can refer to the sample code for AI functions, or implement this method yourself as needed to manage the request lifecycle, including building requests, sending them, handling responses and errors. You can also manage access credentials, model selection, and parameter configuration; ensure invocation security (such as credential protection, encrypted transmission, etc.); integrate middleware as needed (such as sensitive word filtering and log auditing); and set policies such as retry and timeout to improve the security and stability of invocations.
When using the model request handler, DsExcel.NET will not intervene in the model request workflow initiated via IAIModelRequestHandler, nor will it store your API keys or any request/response data.
/// <summary>
/// Implementation of IAIModelRequestHandler for OpenAI API.
/// This class handles HTTP communication with OpenAI-compatible APIs.
/// </summary>
public class OpenAIModelRequestHandler : IAIModelRequestHandler
{
private readonly string _apiEndpoint;
private readonly string _apiKey;
private readonly string _model;
private readonly OpenAIClient _openAIClient;
/// <summary>
/// Initializes a new instance of the <see cref="OpenAIModelRequestHandler"/> class.
/// </summary>
/// <param name="apiEndpoint">The API endpoint URL for OpenAI-compatible API.</param>
/// <param name="apiKey">The API key for authentication.</param>
/// <param name="model">The model name to use for requests.</param>
public OpenAIModelRequestHandler(string apiEndpoint, string apiKey, string model)
{
if (string.IsNullOrWhiteSpace(apiEndpoint))
throw new ArgumentException("API endpoint cannot be null or empty.", nameof(apiEndpoint));
if (string.IsNullOrWhiteSpace(apiKey))
throw new ArgumentException("API key cannot be null or empty.", nameof(apiKey));
_apiEndpoint = apiEndpoint.TrimEnd('/');
_apiKey = apiKey;
_model = model;
// Create OpenAI client with custom endpoint if not using default OpenAI endpoint
var clientOptions = new OpenAIClientOptions();
if (!_apiEndpoint.Contains("api.openai.com"))
{
clientOptions.Endpoint = new Uri(_apiEndpoint);
}
var apiCredentials = new ApiKeyCredential(_apiKey);
_openAIClient = new OpenAIClient(apiCredentials, clientOptions);
}
/// <summary>
/// Sends a model request to the OpenAI API asynchronously.
/// </summary>
/// <param name="request">The model request containing messages and options.</param>
/// <returns>A <see cref="Task{ModelResponse}"/> representing the asynchronous operation.</returns>
public async Task<AIModelResponse> SendRequestAsync(AIModelRequest request)
{
if (request == null)
{
Console.Error.WriteLine("Request cannot be null");
return new AIModelResponse
{
IsSuccess = false,
};
}
try
{
var chatMessages = new List<ChatMessage>();
foreach (var item in request.Messages)
{
ChatMessage message;
switch (item.Role.ToLowerInvariant())
{
case "system":
message = ChatMessage.CreateSystemMessage(item.Content);
break;
case "user":
message = ChatMessage.CreateUserMessage(item.Content);
break;
default:
throw new InvalidOperationException($"Unknown message role: {item.Role}");
}
chatMessages.Add(message);
}
if (chatMessages.Count == 0)
{
throw new InvalidOperationException("The request must contain at least one message.");
}
// Get chat client and make the request
var chatClient = _openAIClient.GetChatClient(_model);
var response = await chatClient.CompleteChatAsync(chatMessages);
if (response?.Value?.Content?.Count > 0)
{
var content = string.Join("", response.Value.Content.Select((ChatMessageContentPart c) => c.Text));
return new AIModelResponse
{
Content = content,
IsSuccess = true
};
}
else
{
Console.Error.WriteLine("No content received from the model.");
return new AIModelResponse
{
IsSuccess = false,
};
}
}
catch (HttpRequestException httpEx)
{
Console.Error.WriteLine($"HTTP request failed: {httpEx.Message}");
return new AIModelResponse
{
IsSuccess = false,
};
}
catch (TaskCanceledException tcEx) when (tcEx.InnerException is TimeoutException)
{
Console.Error.WriteLine("Request timed out.");
return new AIModelResponse
{
IsSuccess = false,
};
}
catch (Exception ex)
{
Console.Error.WriteLine($"An error occurred: {ex.Message}");
return new AIModelResponse
{
IsSuccess = false,
};
}
}
}When the AI function returns the error values below, please refer to the table to troubleshoot possible reasons.
Error Value | Description |
|---|---|
#VALUE! | This error is caused by invalid input parameters or internal errors, resulting in function execution failure. |
#BUSY! | This error indicates that the function is being calculated asynchronously and the result is not yet available. |
#CONNECT! | This error occurs when the IAIModelRequestHandler returns an error response or when a network connection failure occurs. |
#NA! | This error indicates that no Workbook.AIModelRequestHandler is registered. |
Data Protection
Always clean and de-identify sensitive spreadsheet data.
For results returned by requests, ensure sensitive fields are de-identified.
Validation
Verify all AI-generated content.
Perform security checks on output results.
AI-Generated Content Disclaimer
Content Generation Risks
This service utilizes third-party AI models injected by users to generate outputs. Results may contain inaccuracies, omissions, or misleading content due to inherent limitations in model architectures and training data. While we implement prompt engineering and technical constraints to optimize outputs, we cannot eliminate all error risks stemming from fundamental model deficiencies.
User Verification Obligations
By using this service, you acknowledge and agree to:
Conduct manual verification of all generated content
Refrain from using unvalidated outputs in high-risk scenarios (legal, medical, financial, etc.)
Hold us harmless for any direct/indirect damages caused by reliance on generated content
Technical Limitations
We disclaim responsibility for:
Output failures caused by third-party model defects or logic errors
Unsuccessful error recovery attempts through fault-tolerant procedures
Technical constraints inherent in current AI technologies
Intellectual Property Compliance
You must ensure:
Injected models/content do not infringe third-party rights
No illegal/sensitive material is processed through the service
Compliance with model providers' IP agreements
Agreement Updates
We reserve the right to modify these terms to align with:
Technological advancements (e.g. new AI safety protocols)
Regulatory changes (e.g. updated AI governance frameworks)
Service architecture improvements
Because AI models are non-deterministic, the same formula may produce different results when recalculated.