Skip to content

Commit d0f60ff

Browse files
committed
[TEST] Add test case for lxm service internal API
- Added positive and negative TCs for lxm service internal API Signed-off-by: hyunil park <hyunil46.park@samsung.com>
1 parent 7b5acc2 commit d0f60ff

File tree

2 files changed

+313
-9
lines changed

2 files changed

+313
-9
lines changed

tests/capi/meson.build

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -66,15 +66,31 @@ if get_option('enable-ml-service')
6666
)
6767
test('unittest_capi_service_offloading', unittest_capi_service_offloading, env: testenv, timeout: 100)
6868

69-
if support_training_offloading
70-
unittest_capi_service_training_offloading = executable('unittest_capi_service_training_offloading',
71-
'unittest_capi_service_training_offloading.cc',
72-
dependencies: service_unittest_deps,
73-
install: get_option('install-test'),
74-
install_dir: unittest_install_dir
75-
)
76-
test('unittest_capi_service_training_offloading', unittest_capi_service_training_offloading, env: testenv, timeout: 100)
77-
endif
69+
if support_training_offloading
70+
unittest_capi_service_training_offloading = executable('unittest_capi_service_training_offloading',
71+
'unittest_capi_service_training_offloading.cc',
72+
dependencies: service_unittest_deps,
73+
install: get_option('install-test'),
74+
install_dir: unittest_install_dir
75+
)
76+
test('unittest_capi_service_training_offloading', unittest_capi_service_training_offloading, env: testenv, timeout: 100)
77+
endif
78+
79+
# LXM Service Tests
80+
# These tests require both ml-service and llamacpp to be enabled.
81+
llamacpp_dep = dependency('llama', required: false)
82+
if llamacpp_dep.found()
83+
# Note: The source file itself is also conditionally compiled with ENABLE_LLAMACPP.
84+
unittest_capi_lxm_service = executable('unittest_capi_lxm_service',
85+
'unittest_capi_lxm_service.cc',
86+
dependencies: service_unittest_deps,
87+
install: get_option('install-test'),
88+
install_dir: unittest_install_dir
89+
)
90+
test('unittest_capi_lxm_service', unittest_capi_lxm_service, env: testenv, timeout: 120) # Increased timeout for LLM response
91+
else
92+
message('LXM Service tests will be skipped because llama dependency was not found.')
93+
endif
7894
endif
7995
endif
8096

Lines changed: 288 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,288 @@
1+
/* SPDX-License-Identifier: Apache-2.0 */
2+
/**
3+
* @file unittest_capi_lxm_service.cc
4+
* @date 26 JULY 2025
5+
* @brief Unit test for ml-lxm-service.
6+
* @see https://github.com/nnstreamer/api
7+
* @author Hyunil Park <hyunil46.park@samsung.com>
8+
* @bug No known bugs
9+
*/
10+
11+
#include <gtest/gtest.h>
12+
#include <glib.h>
13+
#include <ml-api-service-private.h>
14+
#include <ml-api-service.h>
15+
#include <string.h>
16+
#include "ml-lxm-service-internal.h"
17+
#include "unittest_util.h"
18+
19+
#if defined(ENABLE_LLAMACPP)
20+
21+
/**
22+
* @brief Internal function to get the model file path.
23+
*/
24+
static gchar *
25+
_get_model_path (const gchar *model_name)
26+
{
27+
const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH");
28+
29+
/* Supposed to run test in build directory. */
30+
if (root_path == NULL)
31+
root_path = "..";
32+
33+
gchar *model_file = g_build_filename (
34+
root_path, "tests", "test_models", "models", model_name, NULL);
35+
36+
return model_file;
37+
}
38+
39+
/**
40+
* @brief Macro to skip testcase if required files are not ready.
41+
*/
42+
#define skip_lxm_tc(tc_name) \
43+
do { \
44+
g_autofree gchar *model_file = _get_model_path ("llama-2-7b-chat.Q2_K.gguf"); \
45+
if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) { \
46+
g_autofree gchar *msg = g_strdup_printf ( \
47+
"Skipping '%s' due to missing model file. " \
48+
"Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF.", \
49+
tc_name); \
50+
GTEST_SKIP () << msg; \
51+
} \
52+
} while (0)
53+
54+
/**
55+
* @brief Test data structure to pass to the callback.
56+
*/
57+
typedef struct {
58+
int token_count;
59+
GString *received_tokens;
60+
} lxm_test_data_s;
61+
62+
/**
63+
* @brief Callback function for LXM service token streaming.
64+
*/
65+
static void
66+
_lxm_token_cb (ml_service_event_e event, ml_information_h event_data, void *user_data)
67+
{
68+
lxm_test_data_s *tdata = (lxm_test_data_s *) user_data;
69+
ml_tensors_data_h data = NULL;
70+
void *_raw = NULL;
71+
size_t _size = 0;
72+
int status;
73+
74+
switch (event) {
75+
case ML_SERVICE_EVENT_NEW_DATA:
76+
ASSERT_TRUE (event_data != NULL);
77+
78+
status = ml_information_get (event_data, "data", &data);
79+
EXPECT_EQ (status, ML_ERROR_NONE);
80+
if (status != ML_ERROR_NONE)
81+
return;
82+
83+
status = ml_tensors_data_get_tensor_data (data, 0U, &_raw, &_size);
84+
EXPECT_EQ (status, ML_ERROR_NONE);
85+
if (status != ML_ERROR_NONE)
86+
return;
87+
88+
if (tdata) {
89+
if (tdata->received_tokens) {
90+
g_string_append_len (tdata->received_tokens, (const char *) _raw, _size);
91+
}
92+
tdata->token_count++;
93+
}
94+
g_print ("%.*s", (int) _size, (char *) _raw); // Print received token
95+
break;
96+
default:
97+
// Handle unknown or unimplemented events if necessary
98+
g_printerr ("Received unhandled LXM service event: %d\n", event);
99+
break;
100+
}
101+
}
102+
103+
/**
104+
* @brief Internal function to run a full LXM session test.
105+
*/
106+
static void
107+
_run_lxm_session_test (const gchar *config_path, const gchar *input_text,
108+
const ml_lxm_generation_options_s *options)
109+
{
110+
ml_lxm_session_h session = NULL;
111+
ml_lxm_prompt_h prompt = NULL;
112+
lxm_test_data_s tdata = { 0, NULL };
113+
int status;
114+
115+
tdata.received_tokens = g_string_new ("");
116+
117+
// 1. Create session
118+
status = ml_lxm_session_create (&session, config_path, NULL);
119+
ASSERT_EQ (status, ML_ERROR_NONE);
120+
ASSERT_TRUE (session != NULL);
121+
122+
// 2. Create prompt
123+
status = ml_lxm_prompt_create (&prompt);
124+
ASSERT_EQ (status, ML_ERROR_NONE);
125+
ASSERT_TRUE (prompt != NULL);
126+
127+
status = ml_lxm_prompt_append_text (prompt, input_text);
128+
ASSERT_EQ (status, ML_ERROR_NONE);
129+
130+
// 3. Generate response
131+
status = ml_lxm_session_respond (session, prompt, options, _lxm_token_cb, &tdata);
132+
ASSERT_EQ (status, ML_ERROR_NONE);
133+
134+
// Wait for the callback to receive data.
135+
// 5 seconds should be enough for a simple response.
136+
g_usleep (5000000U);
137+
138+
// 4. Verify results
139+
EXPECT_GT (tdata.token_count, 0);
140+
EXPECT_GT (tdata.received_tokens->len, 0U);
141+
142+
g_print ("\nReceived total tokens: %d\n", tdata.token_count);
143+
g_print ("Full received text: %s\n", tdata.received_tokens->str);
144+
145+
// 5. Cleanup
146+
status = ml_lxm_prompt_destroy (prompt);
147+
EXPECT_EQ (status, ML_ERROR_NONE);
148+
149+
status = ml_lxm_session_destroy (session);
150+
EXPECT_EQ (status, ML_ERROR_NONE);
151+
152+
if (tdata.received_tokens) {
153+
g_string_free (tdata.received_tokens, TRUE);
154+
}
155+
}
156+
157+
/**
158+
* @brief Test basic flow of LXM service.
159+
*/
160+
TEST (MLLxmService, basicFlow_p)
161+
{
162+
skip_lxm_tc ("basicFlow_p");
163+
164+
g_autofree gchar *config = get_config_path ("config_single_llamacpp.conf");
165+
ASSERT_TRUE (config != NULL);
166+
167+
const gchar input_text[] = "Hello LXM, how are you?";
168+
ml_lxm_generation_options_s options = { 0.8, 32 }; // Example options
169+
170+
_run_lxm_session_test (config, input_text, &options);
171+
}
172+
173+
174+
/**
175+
* @brief Test LXM service with invalid parameters.
176+
*/
177+
TEST (MLLxmService, invalidParams_n)
178+
{
179+
ml_lxm_session_h session = NULL;
180+
ml_lxm_prompt_h prompt = NULL;
181+
int status;
182+
ml_lxm_generation_options_s options = { 0.5, 10 };
183+
g_autofree gchar *valid_config = get_config_path ("config_single_llamacpp.conf");
184+
185+
// ml_lxm_session_create
186+
status = ml_lxm_session_create (NULL, valid_config, NULL);
187+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
188+
status = ml_lxm_session_create (&session, NULL, NULL);
189+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
190+
// This test might fail if the config file itself is valid but points to non-existent models,
191+
// as ml_service_new might return a different error.
192+
// For now, we assume it returns INVALID_PARAMETER for a non-existent config file path.
193+
status = ml_lxm_session_create (&session, "non_existent_config.conf", NULL);
194+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
195+
196+
// Create valid session for subsequent tests
197+
// This might be skipped if model files are not present, but invalid param tests should run independently if possible.
198+
// To ensure tests run, we might need a minimal valid config that doesn't require heavy models,
199+
// or accept that some parts of this test are conditional.
200+
// For now, let's try to create a session. If it fails due to missing models, the subsequent tests won't run.
201+
status = ml_lxm_session_create (&session, valid_config, NULL);
202+
if (status == ML_ERROR_NONE) {
203+
// ml_lxm_prompt_create
204+
status = ml_lxm_prompt_create (NULL);
205+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
206+
207+
status = ml_lxm_prompt_create (&prompt);
208+
ASSERT_EQ (status, ML_ERROR_NONE);
209+
210+
// ml_lxm_prompt_append_text
211+
status = ml_lxm_prompt_append_text (NULL, "text");
212+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
213+
status = ml_lxm_prompt_append_text (prompt, NULL);
214+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
215+
216+
// ml_lxm_prompt_append_instruction
217+
status = ml_lxm_prompt_append_instruction (NULL, "instruction");
218+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
219+
status = ml_lxm_prompt_append_instruction (prompt, NULL);
220+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
221+
222+
// ml_lxm_session_set_instructions
223+
status = ml_lxm_session_set_instructions (NULL, "new instructions");
224+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
225+
status = ml_lxm_session_set_instructions (session, NULL); // Setting NULL instructions should be fine
226+
EXPECT_EQ (status, ML_ERROR_NONE);
227+
status = ml_lxm_session_set_instructions (session, "new instructions");
228+
EXPECT_EQ (status, ML_ERROR_NONE);
229+
230+
231+
// ml_lxm_session_respond
232+
status = ml_lxm_session_respond (NULL, prompt, &options, _lxm_token_cb, NULL);
233+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
234+
status = ml_lxm_session_respond (session, NULL, &options, _lxm_token_cb, NULL);
235+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
236+
status = ml_lxm_session_respond (session, prompt, &options, NULL, NULL);
237+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
238+
239+
// ml_lxm_prompt_destroy
240+
status = ml_lxm_prompt_destroy (NULL);
241+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
242+
status = ml_lxm_prompt_destroy (prompt);
243+
EXPECT_EQ (status, ML_ERROR_NONE);
244+
prompt = NULL;
245+
246+
// ml_lxm_session_destroy
247+
status = ml_lxm_session_destroy (NULL);
248+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
249+
status = ml_lxm_session_destroy (session);
250+
EXPECT_EQ (status, ML_ERROR_NONE);
251+
session = NULL;
252+
} else {
253+
g_print ("Skipping part of invalidParams_n as session creation failed (possibly due to missing models/config).\n");
254+
}
255+
}
256+
257+
/**
258+
* @brief Main function to run the test.
259+
*/
260+
int
261+
main (int argc, char **argv)
262+
{
263+
int result = -1;
264+
265+
try {
266+
testing::InitGoogleTest (&argc, argv);
267+
} catch (...) {
268+
g_warning ("catch 'testing::internal::<unnamed>::ClassUniqueToAlwaysTrue'");
269+
}
270+
271+
/* ignore tizen feature status while running the testcases */
272+
set_feature_state (ML_FEATURE, SUPPORTED);
273+
set_feature_state (ML_FEATURE_INFERENCE, SUPPORTED);
274+
set_feature_state (ML_FEATURE_SERVICE, SUPPORTED);
275+
276+
try {
277+
result = RUN_ALL_TESTS ();
278+
} catch (...) {
279+
g_warning ("catch `testing::internal::GoogleTestFailureException`");
280+
}
281+
282+
set_feature_state (ML_FEATURE, NOT_CHECKED_YET);
283+
set_feature_state (ML_FEATURE_INFERENCE, NOT_CHECKED_YET);
284+
set_feature_state (ML_FEATURE_SERVICE, NOT_CHECKED_YET);
285+
286+
return result;
287+
}
288+
#endif /* ENABLE_LLAMACPP */

0 commit comments

Comments
 (0)