diff --git a/c/include/ml-lxm-service-internal.h b/c/include/ml-lxm-service-internal.h new file mode 100644 index 00000000..1d11a3ee --- /dev/null +++ b/c/include/ml-lxm-service-internal.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/** + * @file ml-lxm-service-internal.h + * @date 23 JULY 2025 + * @brief Machine Learning LXM(LLM, LVM, etc.) Service API + * @see https://github.com/nnstreamer/api + * @author Hyunil Park + * @bug No known bugs except for NYI items + */ + +/** + * @example sample_lxm_service.c + * @brief Sample application demonstrating ML LXM Service API usage + * + * This sample shows how to: + * - Create and configure an LXM session + * - Build prompts with text and instructions + * - Generate streaming responses with custom options + * - Handle token callbacks for real-time processing + * + * Configuration file example (config.json): + * @code + * { + * "single" : + * { + * "framework" : "flare", + * "model" : ["sflare_if_4bit_3b.bin"], + * "adapter" : ["history_lora.bin"], + * "custom" : "tokenizer_path:tokenizer.json,backend:CPU,output_size:1024,model_type:3B,data_type:W4A32", + * "invoke_dynamic" : "true", + * } + * } + * @endcode + * + * Basic usage workflow: + * @code + * // 1. Create session with callback + * ml_lxm_session_h session; + * ml_lxm_session_create("/path/to/config.json", NULL, token_handler, NULL, &session); + * + * // 2. Create prompt + * ml_lxm_prompt_h prompt; + * ml_lxm_prompt_create(&prompt); + * ml_lxm_prompt_append_text(prompt, "Hello AI"); + * + * // 3. Generate response with options (callback is already set during session creation) + * ml_option_h options = NULL; + * ml_option_create(&options); + * ml_option_set(options, "temperature", g_strdup_printf("%f", 1.0), g_free); + * ml_option_set(options, "max_tokens", g_strdup_printf("%zu", (size_t)50), g_free); + * ml_lxm_session_respond(session, prompt, options); + * ml_option_destroy(options); + * + * // 4. Cleanup + * ml_lxm_prompt_destroy(prompt); + * ml_lxm_session_destroy(session); + * @endcode + * + * Complete example with token callback: + * @code + * #include "ml-lxm-service-internal.h" + * #include + * + * static void token_handler(ml_service_event_e event, + * ml_information_h event_data, + * void *user_data); + * + * int main() { + * ml_lxm_session_h session = NULL; + * ml_lxm_prompt_h prompt = NULL; + * int ret; + * + * // Check availability first + * ml_lxm_availability_e status; + * ret = ml_lxm_check_availability(&status); + * if (ret != ML_ERROR_NONE || status != ML_LXM_AVAILABILITY_AVAILABLE) { + * std::cout << "LXM service not available" << std::endl; + * return -1; + * } + * + * // 1. Create session with config, instructions, and callback + * ret = ml_lxm_session_create("/path/to/config.json", "You are a helpful AI assistant", token_handler, NULL, &session); + * if (ret != ML_ERROR_NONE) { + * std::cout << "Failed to create session" << std::endl; + * return -1; + * } + * + * // 2. Create prompt + * ret = ml_lxm_prompt_create(&prompt); + * if (ret != ML_ERROR_NONE) { + * std::cout << "Failed to create prompt" << std::endl; + * ml_lxm_session_destroy(session); + * return -1; + * } + * + * // Add text to prompt + * ret = ml_lxm_prompt_append_text(prompt, "Explain quantum computing in simple terms"); + * if (ret != ML_ERROR_NONE) { + * std::cout << "Failed to append text to prompt" << std::endl; + * ml_lxm_prompt_destroy(prompt); + * ml_lxm_session_destroy(session); + * return -1; + * } + * + * // 3. Generate response with custom options + * ml_option_h options = NULL; + * ml_option_create(&options); + * ml_option_set(options, "temperature", g_strdup_printf("%f", 1.2), g_free); + * ml_option_set(options, "max_tokens", g_strdup_printf("%zu", (size_t)128), g_free); + * + * std::cout << "AI Response: "; + * ret = ml_lxm_session_respond(session, prompt, options); + * ml_option_destroy(options); + * if (ret != ML_ERROR_NONE) { + * std::cout << "Failed to generate response" << std::endl; + * } + * std::cout << std::endl; + * + * // 4. Cleanup + * ml_lxm_prompt_destroy(prompt); + * ml_lxm_session_destroy(session); + * + * return 0; + * } + * + * static void token_handler(ml_service_event_e event, + * ml_information_h event_data, + * void *user_data) { + * ml_tensors_data_h data = NULL; + * void *_raw = NULL; + * size_t _size = 0; + * int ret; + * + * switch (event) { + * case ML_SERVICE_EVENT_NEW_DATA: + * if (event_data != NULL) { + * ret = ml_information_get(event_data, "data", &data); + * if (ret == ML_ERROR_NONE) { + * ret = ml_tensors_data_get_tensor_data(data, 0U, &_raw, &_size); + * if (ret == ML_ERROR_NONE && _raw != NULL && _size > 0) { + * std::cout.write(static_cast(_raw), _size); + * std::cout.flush(); + * } + * } + * } + * break; + * default: + * break; + * } + * } + * @endcode + */ + +#ifndef __ML_LXM_SERVICE_INTERNAL_H__ +#define __ML_LXM_SERVICE_INTERNAL_H__ + +#include +#include +#ifdef __cplusplus +extern "C" +{ +#endif + +/** + * @brief Enumeration for LXM service availability status. + */ +typedef enum +{ + ML_LXM_AVAILABILITY_AVAILABLE = 0, + ML_LXM_AVAILABILITY_DEVICE_NOT_ELIGIBLE, + ML_LXM_AVAILABILITY_SERVICE_DISABLED, + ML_LXM_AVAILABILITY_MODEL_NOT_READY, + ML_LXM_AVAILABILITY_UNKNOWN +} ml_lxm_availability_e; + +/** + * @brief Checks LXM service availability. + * @param[out] status Current availability status. + * @return ML_ERROR_NONE on success, error code otherwise. + */ +int ml_lxm_check_availability (ml_lxm_availability_e * status); + +/** + * @brief A handle for lxm session. + */ +typedef void *ml_lxm_session_h; + +/** + * @brief Creates an LXM session with mandatory callback. + * @param[in] config_path Path to configuration file. + * @param[in] instructions Initial instructions (optional). + * @param[in] callback Callback function for session events (mandatory). + * @param[in] user_data User data to be passed to the callback. + * @param[out] session Session handle. + * @return ML_ERROR_NONE on success. + * @note The callback parameter is mandatory and will be set during session creation. + */ +int ml_lxm_session_create (const char *config_path, const char *instructions, ml_service_event_cb callback, void *user_data, ml_lxm_session_h * session); + +/** + * @brief Destroys an LXM session. + * @param[in] session Session handle. + * @return ML_ERROR_NONE on success. + */ +int ml_lxm_session_destroy (ml_lxm_session_h session); + +/** + * @brief A handle for lxm prompt. + */ +typedef void *ml_lxm_prompt_h; + +/** + * @brief Creates a prompt object. + * @param[out] prompt Prompt handle. + * @return ML_ERROR_NONE on success. + */ +int ml_lxm_prompt_create (ml_lxm_prompt_h * prompt); + +/** + * @brief Appends text to a prompt. + * @param[in] prompt Prompt handle. + * @param[in] text Text to append. + * @return ML_ERROR_NONE on success. + */ +int ml_lxm_prompt_append_text (ml_lxm_prompt_h prompt, const char *text); + +/** + * @brief Appends an instruction to a prompt. + * @param[in] prompt Prompt handle. + * @param[in] instruction Instruction to append. + * @return ML_ERROR_NONE on success. + */ +int ml_lxm_prompt_append_instruction (ml_lxm_prompt_h prompt, const char *instruction); + +/** + * @brief Destroys a prompt object. + * @param[in] prompt Prompt handle. + * @return ML_ERROR_NONE on success. + */ +int ml_lxm_prompt_destroy (ml_lxm_prompt_h prompt); + +/** + * @brief Sets runtime instructions for a session. + * @param[in] session Session handle. + * @param[in] instructions New instructions. + * @return ML_ERROR_NONE on success. + */ +int ml_lxm_session_set_instructions (ml_lxm_session_h session, const char *instructions); + + +/** + * @brief Generates an token-streamed response. + * @param[in] session Session handle. + * @param[in] prompt Prompt handle. + * @param[in] options Generation parameters (ml_option_h). + * @return ML_ERROR_NONE on success. + * @note The callback should be set using ml_lxm_session_set_event_cb() before calling this function. + */ +int ml_lxm_session_respond (ml_lxm_session_h session, ml_lxm_prompt_h prompt, ml_option_h options); + +#ifdef __cplusplus +} +#endif +#endif +/* __ML_LXM_SERVICE_INTERNAL_H__ */ diff --git a/c/src/meson.build b/c/src/meson.build index e584cef3..9eead86c 100644 --- a/c/src/meson.build +++ b/c/src/meson.build @@ -1,7 +1,7 @@ nns_capi_common_srcs = files('ml-api-common.c', 'ml-api-inference-internal.c') nns_capi_single_srcs = files('ml-api-inference-single.c') nns_capi_pipeline_srcs = files('ml-api-inference-pipeline.c') -nns_capi_service_srcs = files('ml-api-service.c', 'ml-api-service-extension.c', 'ml-api-service-agent-client.c') +nns_capi_service_srcs = files('ml-api-service.c', 'ml-api-service-extension.c', 'ml-api-service-agent-client.c', 'ml-lxm-service.c') if support_nnstreamer_edge nns_capi_service_srcs += files('ml-api-service-query.c') diff --git a/c/src/ml-lxm-service.c b/c/src/ml-lxm-service.c new file mode 100644 index 00000000..85502f65 --- /dev/null +++ b/c/src/ml-lxm-service.c @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/** + * @file ml-lxm-service.c + * @date 23 JULY 2025 + * @brief Machine Learning LXM(LLM, LVM, etc.) Service API + * @see https://github.com/nnstreamer/api + * @author Hyunil Park + * @bug No known bugs except for NYI items + */ +#include +#include +#include +#include +#include "ml-lxm-service-internal.h" + +/** + * @brief Internal structure for the session. + */ +typedef struct +{ + ml_service_h service_handle; + char *config_path; + char *instructions; + ml_service_event_cb user_callback; /**< User callback function */ + void *user_data; /**< User data passed to callback */ +} ml_lxm_session_internal; + +/** + * @brief Internal structure for the prompt. + */ +typedef struct +{ + GString *text; +} ml_lxm_prompt_internal; + +/** + * @brief Checks LXM service availability. + * @param[out] status Current availability status. + * @return ML_ERROR_NONE on success, error code otherwise. + */ +int +ml_lxm_check_availability (ml_lxm_availability_e * status) +{ + *status = ML_LXM_AVAILABILITY_AVAILABLE; + + return ML_ERROR_NONE; +} + +/** + * @brief Creates an LXM session with mandatory callback. + * @param[in] config_path Path to configuration file. + * @param[in] instructions Initial instructions (optional). + * @param[in] callback Callback function for session events (mandatory). + * @param[in] user_data User data to be passed to the callback. + * @param[out] session Session handle. + * @return ML_ERROR_NONE on success. + * @note The callback parameter is mandatory and will be set during session creation. + */ +int +ml_lxm_session_create (const char *config_path, const char *instructions, + ml_service_event_cb callback, void *user_data, ml_lxm_session_h * session) +{ + ml_service_h handle; + ml_lxm_session_internal *s; + int ret; + + if (!session || !config_path || !callback) + return ML_ERROR_INVALID_PARAMETER; + + ret = ml_service_new (config_path, &handle); + if (ret != ML_ERROR_NONE) { + return ret; + } + + s = g_malloc0 (sizeof (ml_lxm_session_internal)); + if (!s) { + ml_service_destroy (handle); + return ML_ERROR_OUT_OF_MEMORY; + } + + s->config_path = g_strdup (config_path); + s->instructions = g_strdup (instructions); + s->service_handle = handle; + s->user_callback = callback; + s->user_data = user_data; + + ret = ml_service_set_event_cb (s->service_handle, callback, user_data); + if (ret != ML_ERROR_NONE) { + /* Cleanup on failure */ + ml_service_destroy (s->service_handle); + g_free (s->config_path); + g_free (s->instructions); + g_free (s); + return ret; + } + + *session = s; + return ML_ERROR_NONE; +} + + +/** + * @brief Destroys an LXM session. + * @param[in] session Session handle. + * @return ML_ERROR_NONE on success. + */ +int +ml_lxm_session_destroy (ml_lxm_session_h session) +{ + ml_lxm_session_internal *s; + + if (!session) + return ML_ERROR_INVALID_PARAMETER; + + s = (ml_lxm_session_internal *) session; + + ml_service_destroy (s->service_handle); + + g_free (s->config_path); + g_free (s->instructions); + g_free (s); + + return ML_ERROR_NONE; +} + +/** + * @brief Creates a prompt object. + * @param[out] prompt Prompt handle. + * @return ML_ERROR_NONE on success. + */ +int +ml_lxm_prompt_create (ml_lxm_prompt_h * prompt) +{ + ml_lxm_prompt_internal *p; + + if (!prompt) + return ML_ERROR_INVALID_PARAMETER; + + p = g_malloc0 (sizeof (ml_lxm_prompt_internal)); + if (!p) + return ML_ERROR_OUT_OF_MEMORY; + + p->text = g_string_new (""); + *prompt = p; + + return ML_ERROR_NONE; +} + +/** + * @brief Destroys a prompt object. + * @param[in] prompt Prompt handle. + * @return ML_ERROR_NONE on success. + */ +int +ml_lxm_prompt_destroy (ml_lxm_prompt_h prompt) +{ + ml_lxm_prompt_internal *p; + + if (!prompt) + return ML_ERROR_INVALID_PARAMETER; + + p = (ml_lxm_prompt_internal *) prompt; + g_string_free (p->text, TRUE); + g_free (p); + + return ML_ERROR_NONE; +} + +/** + * @brief Appends text to a prompt. + * @param[in] prompt Prompt handle. + * @param[in] text Text to append. + * @return ML_ERROR_NONE on success. + */ +int +ml_lxm_prompt_append_text (ml_lxm_prompt_h prompt, const char *text) +{ + ml_lxm_prompt_internal *p; + + if (!prompt || !text) + return ML_ERROR_INVALID_PARAMETER; + + p = (ml_lxm_prompt_internal *) prompt; + g_string_append (p->text, text); + + return ML_ERROR_NONE; +} + +/** + * @brief Appends an instruction to a prompt. + * @param[in] prompt Prompt handle. + * @param[in] instruction Instruction to append. + * @return ML_ERROR_NONE on success. + */ +int +ml_lxm_prompt_append_instruction (ml_lxm_prompt_h prompt, + const char *instruction) +{ + return ml_lxm_prompt_append_text (prompt, instruction); +} + +/** + * @brief Sets runtime instructions for a session. + * @param[in] session Session handle. + * @param[in] instructions New instructions. + * @return ML_ERROR_NONE on success. + */ +int +ml_lxm_session_set_instructions (ml_lxm_session_h session, + const char *instructions) +{ + ml_lxm_session_internal *s; + + if (!session) + return ML_ERROR_INVALID_PARAMETER; + + s = (ml_lxm_session_internal *) session; + g_free (s->instructions); + s->instructions = g_strdup (instructions); + + return ML_ERROR_NONE; +} + +/** + * @brief Generates an token-streamed response. + * @param[in] session Session handle. + * @param[in] prompt Prompt handle. + * @param[in] options Generation parameters (ml_option_h). + * @return ML_ERROR_NONE on success. + * @note The callback is automatically set during session creation. + */ +int +ml_lxm_session_respond (ml_lxm_session_h session, + ml_lxm_prompt_h prompt, ml_option_h options) +{ + int ret = ML_ERROR_NONE; + ml_lxm_session_internal *s; + ml_lxm_prompt_internal *p; + GString *full_input; + ml_tensors_info_h input_info = NULL; + ml_tensors_data_h input_data = NULL; + + if (!session || !prompt) + return ML_ERROR_INVALID_PARAMETER; + + s = (ml_lxm_session_internal *) session; + p = (ml_lxm_prompt_internal *) prompt; + + full_input = g_string_new (""); + if (s->instructions && s->instructions[0] != '\0') { + g_string_append (full_input, s->instructions); + g_string_append (full_input, "\n"); + } + g_string_append (full_input, p->text->str); + + /* Get input information */ + ret = ml_service_get_input_information (s->service_handle, NULL, &input_info); + if (ret != ML_ERROR_NONE) + goto error; + + /* Create input data */ + ret = ml_tensors_data_create (input_info, &input_data); + if (ret != ML_ERROR_NONE) { + goto error; + } + + /* Set tensor data */ + ret = + ml_tensors_data_set_tensor_data (input_data, 0U, full_input->str, + full_input->len); + if (ret != ML_ERROR_NONE) { + goto error; + } + + /* Send request */ + ret = ml_service_request (s->service_handle, NULL, input_data); + +error: + if (input_info) + ml_tensors_info_destroy (input_info); + if (input_data) + ml_tensors_data_destroy (input_data); + if (full_input) + g_string_free (full_input, TRUE); + + return ret; +} diff --git a/tests/capi/meson.build b/tests/capi/meson.build index 60223d83..54008e2f 100644 --- a/tests/capi/meson.build +++ b/tests/capi/meson.build @@ -76,6 +76,22 @@ if get_option('enable-ml-service') test('unittest_capi_service_training_offloading', unittest_capi_service_training_offloading, env: testenv, timeout: 100) endif endif + + # LXM Service Tests + # These tests require both ml-service and llamacpp to be enabled. + llamacpp_dep = dependency('llama', required: false) + if llamacpp_dep.found() + # Note: The source file itself is also conditionally compiled with ENABLE_LLAMACPP. + unittest_capi_lxm_service = executable('unittest_capi_lxm_service', + 'unittest_capi_lxm_service.cc', + dependencies: service_unittest_deps, + install: get_option('install-test'), + install_dir: unittest_install_dir + ) + test('unittest_capi_lxm_service', unittest_capi_lxm_service, env: testenv, timeout: 120) # Increased timeout for LLM response + else + message('LXM Service tests will be skipped because llama dependency was not found.') + endif endif if nnfw_dep.found() diff --git a/tests/capi/unittest_capi_lxm_service.cc b/tests/capi/unittest_capi_lxm_service.cc new file mode 100644 index 00000000..5c98e67e --- /dev/null +++ b/tests/capi/unittest_capi_lxm_service.cc @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/** + * @file unittest_capi_lxm_service.cc + * @date 26 JULY 2025 + * @brief Unit test for ml-lxm-service. + * @see https://github.com/nnstreamer/api + * @author Hyunil Park + * @bug No known bugs + */ + +#include +#include +#include +#include +#include +#include +#include "ml-lxm-service-internal.h" +#include "unittest_util.h" + +#if defined(ENABLE_LLAMACPP) + +/** + * @brief Internal function to get the model file path. + */ +static gchar * +_get_model_path (const gchar *model_name) +{ + const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH"); + + /* Supposed to run test in build directory. */ + if (root_path == NULL) + root_path = ".."; + + gchar *model_file = g_build_filename ( + root_path, "tests", "test_models", "models", model_name, NULL); + + return model_file; +} + +/** + * @brief Macro to skip testcase if required files are not ready. + */ +#define skip_lxm_tc(tc_name) \ + do { \ + g_autofree gchar *model_file = _get_model_path ("llama-2-7b-chat.Q2_K.gguf"); \ + if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) { \ + g_autofree gchar *msg = g_strdup_printf ( \ + "Skipping '%s' due to missing model file. " \ + "Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF.", \ + tc_name); \ + GTEST_SKIP () << msg; \ + } \ + } while (0) + +/** + * @brief Test data structure to pass to the callback. + */ +typedef struct { + int token_count; + GString *received_tokens; +} lxm_test_data_s; + +/** + * @brief Callback function for LXM service token streaming. + */ +static void +_lxm_token_cb (ml_service_event_e event, ml_information_h event_data, void *user_data) +{ + lxm_test_data_s *tdata = (lxm_test_data_s *) user_data; + ml_tensors_data_h data = NULL; + void *_raw = NULL; + size_t _size = 0; + int status; + + switch (event) { + case ML_SERVICE_EVENT_NEW_DATA: + ASSERT_TRUE (event_data != NULL); + + status = ml_information_get (event_data, "data", &data); + EXPECT_EQ (status, ML_ERROR_NONE); + if (status != ML_ERROR_NONE) + return; + + status = ml_tensors_data_get_tensor_data (data, 0U, &_raw, &_size); + EXPECT_EQ (status, ML_ERROR_NONE); + if (status != ML_ERROR_NONE) + return; + + if (tdata) { + if (tdata->received_tokens) { + g_string_append_len (tdata->received_tokens, (const char *) _raw, _size); + } + tdata->token_count++; + } + g_print ("%.*s", (int) _size, (char *) _raw); // Print received token + break; + default: + // Handle unknown or unimplemented events if necessary + g_printerr ("Received unhandled LXM service event: %d\n", event); + break; + } +} + +/** + * @brief Internal function to run a full LXM session test. + */ +static void +_run_lxm_session_test (const gchar *config_path, const gchar *input_text, ml_option_h options) +{ + ml_lxm_session_h session = NULL; + ml_lxm_prompt_h prompt = NULL; + lxm_test_data_s tdata = { 0, NULL }; + int status; + + tdata.received_tokens = g_string_new (""); + + // 1. Create session with callback + status = ml_lxm_session_create (config_path, NULL, _lxm_token_cb, &tdata, &session); + ASSERT_EQ (status, ML_ERROR_NONE); + ASSERT_TRUE (session != NULL); + + // 2. Create prompt + status = ml_lxm_prompt_create (&prompt); + ASSERT_EQ (status, ML_ERROR_NONE); + ASSERT_TRUE (prompt != NULL); + + status = ml_lxm_prompt_append_text (prompt, input_text); + ASSERT_EQ (status, ML_ERROR_NONE); + + // 3. Generate response (callback is already set during session creation) + status = ml_lxm_session_respond (session, prompt, options); + ASSERT_EQ (status, ML_ERROR_NONE); + + // Wait for the callback to receive data. + // 10 seconds should be enough for a simple response. + g_usleep (10000000U); + + // 4. Verify results + EXPECT_GT (tdata.token_count, 0); + EXPECT_GT (tdata.received_tokens->len, 0U); + + g_print ("\nReceived total tokens: %d\n", tdata.token_count); + g_print ("Full received text: %s\n", tdata.received_tokens->str); + + // 5. Cleanup + status = ml_lxm_prompt_destroy (prompt); + EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_lxm_session_destroy (session); + EXPECT_EQ (status, ML_ERROR_NONE); + + if (tdata.received_tokens) { + g_string_free (tdata.received_tokens, TRUE); + } +} + +/** + * @brief Test basic flow of LXM service. + */ +TEST (MLLxmService, basicFlow_p) +{ + skip_lxm_tc ("basicFlow_p"); + + g_autofree gchar *config = get_config_path ("config_single_llamacpp.conf"); + ASSERT_TRUE (config != NULL); + + const gchar input_text[] = "Hello LXM, how are you?"; + ml_option_h options = NULL; + int status; + + // Create options + status = ml_option_create (&options); + ASSERT_EQ (status, ML_ERROR_NONE); + ASSERT_TRUE (options != NULL); + + // Set temperature option + status = ml_option_set (options, "temperature", g_strdup_printf ("%f", 0.8), g_free); + ASSERT_EQ (status, ML_ERROR_NONE); + + // Set max_tokens option + status = ml_option_set ( + options, "max_tokens", g_strdup_printf ("%zu", (size_t) 32), g_free); + ASSERT_EQ (status, ML_ERROR_NONE); + + _run_lxm_session_test (config, input_text, options); + + // Cleanup options + ml_option_destroy (options); +} + + +/** + * @brief Test LXM service with invalid parameters. + */ +TEST (MLLxmService, invalidParams_n) +{ + ml_lxm_session_h session = NULL; + ml_lxm_prompt_h prompt = NULL; + int status; + ml_option_h options = NULL; + g_autofree gchar *valid_config = get_config_path ("config_single_llamacpp.conf"); + + // Create options for testing + status = ml_option_create (&options); + ASSERT_EQ (status, ML_ERROR_NONE); + ml_option_set (options, "temperature", g_strdup_printf ("%f", 0.5), g_free); + ml_option_set (options, "max_tokens", g_strdup_printf ("%zu", (size_t) 10), g_free); + + // ml_lxm_session_create + status = ml_lxm_session_create (valid_config, NULL, NULL, NULL, NULL); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_lxm_session_create (NULL, NULL, NULL, NULL, &session); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_lxm_session_create ("non_existent_config.conf", NULL, NULL, NULL, &session); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_lxm_session_create (valid_config, NULL, NULL, NULL, &session); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_lxm_session_create (valid_config, NULL, _lxm_token_cb, NULL, &session); + if (status == ML_ERROR_NONE) { + // ml_lxm_prompt_create + status = ml_lxm_prompt_create (NULL); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_lxm_prompt_create (&prompt); + ASSERT_EQ (status, ML_ERROR_NONE); + + // ml_lxm_prompt_append_text + status = ml_lxm_prompt_append_text (NULL, "text"); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_lxm_prompt_append_text (prompt, NULL); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + // ml_lxm_prompt_append_instruction + status = ml_lxm_prompt_append_instruction (NULL, "instruction"); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_lxm_prompt_append_instruction (prompt, NULL); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + // ml_lxm_session_set_instructions + status = ml_lxm_session_set_instructions (NULL, "new instructions"); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_lxm_session_set_instructions (session, NULL); + EXPECT_EQ (status, ML_ERROR_NONE); + status = ml_lxm_session_set_instructions (session, "new instructions"); + EXPECT_EQ (status, ML_ERROR_NONE); + + // ml_lxm_session_respond - callback is already set during session creation + status = ml_lxm_session_respond (NULL, prompt, options); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_lxm_session_respond (session, NULL, options); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + // Now ml_lxm_session_respond should succeed with valid parameters + status = ml_lxm_session_respond (session, prompt, options); + EXPECT_EQ (status, ML_ERROR_NONE); + + // ml_lxm_prompt_destroy + status = ml_lxm_prompt_destroy (NULL); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_lxm_prompt_destroy (prompt); + EXPECT_EQ (status, ML_ERROR_NONE); + prompt = NULL; + + // ml_lxm_session_destroy + status = ml_lxm_session_destroy (NULL); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_lxm_session_destroy (session); + EXPECT_EQ (status, ML_ERROR_NONE); + session = NULL; + } else { + g_print ("Skipping part of invalidParams_n as session creation failed (possibly due to missing models/config).\n"); + } + + // Cleanup options + ml_option_destroy (options); +} + +/** + * @brief Main function to run the test. + */ +int +main (int argc, char **argv) +{ + int result = -1; + + try { + testing::InitGoogleTest (&argc, argv); + } catch (...) { + g_warning ("catch 'testing::internal::::ClassUniqueToAlwaysTrue'"); + } + + /* ignore tizen feature status while running the testcases */ + set_feature_state (ML_FEATURE, SUPPORTED); + set_feature_state (ML_FEATURE_INFERENCE, SUPPORTED); + set_feature_state (ML_FEATURE_SERVICE, SUPPORTED); + + try { + result = RUN_ALL_TESTS (); + } catch (...) { + g_warning ("catch `testing::internal::GoogleTestFailureException`"); + } + + set_feature_state (ML_FEATURE, NOT_CHECKED_YET); + set_feature_state (ML_FEATURE_INFERENCE, NOT_CHECKED_YET); + set_feature_state (ML_FEATURE_SERVICE, NOT_CHECKED_YET); + + return result; +} +#endif /* ENABLE_LLAMACPP */