99
1010#include < gtest/gtest.h>
1111#include < glib.h>
12-
12+ # include < iostream >
1313#include < ml-api-service-private.h>
1414#include < ml-api-service.h>
1515#include " ml-api-service-extension.h"
@@ -394,8 +394,7 @@ _extension_test_imgclf (ml_service_h handle, gboolean is_pipeline)
394394 * @brief Callback function for scenario test.
395395 */
396396static void
397- _extension_test_llamacpp_cb (
398- ml_service_event_e event, ml_information_h event_data, void *user_data)
397+ _extension_test_llm_cb (ml_service_event_e event, ml_information_h event_data, void *user_data)
399398{
400399 extension_test_data_s *tdata = (extension_test_data_s *) user_data;
401400 ml_tensors_data_h data = NULL ;
@@ -413,7 +412,8 @@ _extension_test_llamacpp_cb (
413412 status = ml_tensors_data_get_tensor_data (data, 0U , &_raw, &_size);
414413 EXPECT_EQ (status, ML_ERROR_NONE);
415414
416- g_print (" %s" , (char *) _raw);
415+ std::cout.write (static_cast <const char *> (_raw), _size); /* korean output */
416+ std::cout.flush ();
417417
418418 if (tdata)
419419 tdata->received ++;
@@ -427,7 +427,7 @@ _extension_test_llamacpp_cb (
427427 * @brief Internal function to run test with ml-service extension handle.
428428 */
429429static inline void
430- _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
430+ _extension_test_llm (ml_service_h handle, gboolean is_pipeline, gchar *file_name, guint sleep_us )
431431{
432432 extension_test_data_s *tdata;
433433 ml_tensors_info_h info;
@@ -436,14 +436,14 @@ _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
436436 gsize len = 0 ;
437437 gchar *contents = NULL ;
438438
439- g_autofree gchar *data_file = _get_data_path (" input.txt " );
439+ g_autofree gchar *data_file = _get_data_path (file_name );
440440 ASSERT_TRUE (g_file_test (data_file, G_FILE_TEST_EXISTS));
441441 ASSERT_TRUE (g_file_get_contents (data_file, &contents, &len, NULL ));
442442
443443 tdata = _create_test_data (is_pipeline);
444444 ASSERT_TRUE (tdata != NULL );
445445
446- status = ml_service_set_event_cb (handle, _extension_test_llamacpp_cb , tdata);
446+ status = ml_service_set_event_cb (handle, _extension_test_llm_cb , tdata);
447447 EXPECT_EQ (status, ML_ERROR_NONE);
448448
449449 /* Create and push input data. */
@@ -457,7 +457,7 @@ _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
457457 status = ml_service_request (handle, NULL , input);
458458 EXPECT_EQ (status, ML_ERROR_NONE);
459459
460- g_usleep (5000000U );
460+ g_usleep (sleep_us );
461461 EXPECT_GT (tdata->received , 0 );
462462
463463 /* Clear callback before releasing tdata. */
@@ -477,8 +477,9 @@ TEST_REQUIRE_TFLITE (MLServiceExtension, scenarioConfigLlamacpp)
477477{
478478 ml_service_h handle;
479479 int status;
480-
480+ g_autofree gchar *input_file = g_strdup ( " input.txt " );
481481 g_autofree gchar *model_file = _get_model_path (" llama-2-7b-chat.Q2_K.gguf" );
482+
482483 if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) {
483484 g_critical (" Skipping scenarioConfigLlamacpp test due to missing model file. "
484485 " Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF" );
@@ -490,7 +491,7 @@ TEST_REQUIRE_TFLITE (MLServiceExtension, scenarioConfigLlamacpp)
490491 status = ml_service_new (config, &handle);
491492 ASSERT_EQ (status, ML_ERROR_NONE);
492493
493- _extension_test_llamacpp (handle, FALSE );
494+ _extension_test_llm (handle, FALSE , input_file, 5000000U );
494495
495496 status = ml_service_destroy (handle);
496497 EXPECT_EQ (status, ML_ERROR_NONE);
@@ -503,8 +504,9 @@ TEST_REQUIRE_TFLITE (MLServiceExtension, scenarioConfigLlamacppAsync)
503504{
504505 ml_service_h handle;
505506 int status;
506-
507+ g_autofree gchar *input_file = g_strdup ( " input.txt " );
507508 g_autofree gchar *model_file = _get_model_path (" llama-2-7b-chat.Q2_K.gguf" );
509+
508510 if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) {
509511 g_critical (" Skipping scenarioConfigLlamacppAsync test due to missing model file. "
510512 " Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF" );
@@ -516,7 +518,33 @@ TEST_REQUIRE_TFLITE (MLServiceExtension, scenarioConfigLlamacppAsync)
516518 status = ml_service_new (config, &handle);
517519 ASSERT_EQ (status, ML_ERROR_NONE);
518520
519- _extension_test_llamacpp (handle, FALSE );
521+ _extension_test_llm (handle, FALSE , input_file, 5000000U );
522+
523+ status = ml_service_destroy (handle);
524+ EXPECT_EQ (status, ML_ERROR_NONE);
525+ }
526+
527+ /* *
528+ * @brief Usage of ml-service extension API.
529+ */
530+ TEST_REQUIRE_TFLITE (MLServiceExtension, scenarioConfigFlare)
531+ {
532+ ml_service_h handle;
533+ int status;
534+ g_autofree gchar *input_file = g_strdup (" flare_input.txt" );
535+ g_autofree gchar *model_file = _get_model_path (" sflare_if_4bit_3b.bin" );
536+
537+ if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) {
538+ g_critical (" Skipping scenarioConfigFlare test due to missing model file.Please download model file" );
539+ return ;
540+ }
541+
542+ g_autofree gchar *config = get_config_path (" config_single_flare.conf" );
543+
544+ status = ml_service_new (config, &handle);
545+ ASSERT_EQ (status, ML_ERROR_NONE);
546+
547+ _extension_test_llm (handle, FALSE , input_file, 40000000U );
520548
521549 status = ml_service_destroy (handle);
522550 EXPECT_EQ (status, ML_ERROR_NONE);
0 commit comments