diff --git a/cmake_targets/CMakeLists.txt b/cmake_targets/CMakeLists.txt index 92126988d523766fb46e8b2afbe5e0c478b77e80..0774a32f7bcdc2bf018ba77657ddfe04f3dc8f82 100644 --- a/cmake_targets/CMakeLists.txt +++ b/cmake_targets/CMakeLists.txt @@ -871,6 +871,7 @@ add_library(FLEXRAN_AGENT ${OPENAIR2_DIR}/ENB_APP/flexran_agent_net_comm.c ${OPENAIR2_DIR}/ENB_APP/flexran_agent_async.c ${OPENAIR2_DIR}/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.c + ${OPENAIR2_DIR}/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_slice_verification.c ) set(FLEXRAN_AGENT_LIB FLEXRAN_AGENT) #include_directories(${OPENAIR2_DIR}/ENB_APP) diff --git a/common/utils/load_module_shlib.c b/common/utils/load_module_shlib.c index c0afdb0b6493cf95bf96b3e0095816dda73dcf01..a4b4742c94c6041cafe35712e1e092e181e0d005 100644 --- a/common/utils/load_module_shlib.c +++ b/common/utils/load_module_shlib.c @@ -120,75 +120,116 @@ int ret; int load_module_shlib(char *modname,loader_shlibfunc_t *farray, int numf, void *autoinit_arg) { - void *lib_handle; - initfunc_t fpi; - checkverfunc_t fpc; - getfarrayfunc_t fpg; - char *shlib_path; - char *afname=NULL; - int ret=0; - - if (loader_data.shlibpath == NULL) { - loader_init(); - } + void *lib_handle = NULL; + initfunc_t fpi; + checkverfunc_t fpc; + getfarrayfunc_t fpg; + char *shlib_path = NULL; + char *afname = NULL; + int ret = 0; + int lib_idx = -1; + + if (!modname) { + fprintf(stderr, "[LOADER] load_module_shlib(): no library name given\n"); + return -1; + } - shlib_path = loader_format_shlibpath(modname); + if (!loader_data.shlibpath) { + loader_init(); + } - ret = 0; - lib_handle = dlopen(shlib_path, RTLD_LAZY|RTLD_NODELETE|RTLD_GLOBAL); - if (!lib_handle) { - fprintf(stderr,"[LOADER] library %s is not loaded: %s\n", shlib_path,dlerror()); + shlib_path = loader_format_shlibpath(modname); + + for (int i = 0; i < loader_data.numshlibs; i++) { + if (strcmp(loader_data.shlibs[i].name, modname) == 0) { + printf("[LOADER] library %s has been loaded previously, reloading function pointers\n", + shlib_path); + lib_idx = i; + break; + } + } + if (lib_idx < 0) { + lib_idx = loader_data.numshlibs; + ++loader_data.numshlibs; + if (loader_data.numshlibs > loader_data.maxshlibs) { + fprintf(stderr, "[LOADER] can not load more than %d shlibs\n", + loader_data.maxshlibs); ret = -1; - } else { - printf("[LOADER] library %s successfully loaded\n", shlib_path); - afname=malloc(strlen(modname)+15); - sprintf(afname,"%s_checkbuildver",modname); - fpc = dlsym(lib_handle,afname); - if (fpc != NULL ){ - int chkver_ret = fpc(loader_data.mainexec_buildversion, &(loader_data.shlibs[loader_data.numshlibs].shlib_buildversion)); - if (chkver_ret < 0) { - fprintf(stderr,"[LOADER] %s %d lib %s, version mismatch",__FILE__, __LINE__, modname); - exit_fun("[LOADER] unrecoverable error"); - } - } - sprintf(afname,"%s_autoinit",modname); - fpi = dlsym(lib_handle,afname); + goto load_module_shlib_exit; + } + loader_data.shlibs[lib_idx].name = strdup(modname); + loader_data.shlibs[lib_idx].thisshlib_path = strdup(shlib_path); + } - if (fpi != NULL ) { - fpi(autoinit_arg); - } + lib_handle = dlopen(shlib_path, RTLD_LAZY|RTLD_NODELETE|RTLD_GLOBAL); + if (!lib_handle) { + fprintf(stderr,"[LOADER] library %s is not loaded: %s\n", shlib_path,dlerror()); + ret = -1; + goto load_module_shlib_exit; + } + + printf("[LOADER] library %s successfully loaded\n", shlib_path); + afname = malloc(strlen(modname)+15); + if (!afname) { + fprintf(stderr, "[LOADER] unable to allocate memory for library %s\n", shlib_path); + ret = -1; + goto load_module_shlib_exit; + } + sprintf(afname,"%s_checkbuildver",modname); + fpc = dlsym(lib_handle,afname); + if (fpc) { + int chkver_ret = fpc(loader_data.mainexec_buildversion, + &(loader_data.shlibs[lib_idx].shlib_buildversion)); + if (chkver_ret < 0) { + fprintf(stderr, "[LOADER] %s %d lib %s, version mismatch", + __FILE__, __LINE__, modname); + ret = -1; + goto load_module_shlib_exit; + } + } + sprintf(afname,"%s_autoinit",modname); + fpi = dlsym(lib_handle,afname); - if (farray != NULL) { - loader_data.shlibs[loader_data.numshlibs].funcarray=malloc(numf*sizeof(loader_shlibfunc_t)); - loader_data.shlibs[loader_data.numshlibs].numfunc=0; - for (int i=0; i<numf; i++) { - farray[i].fptr = dlsym(lib_handle,farray[i].fname); - if (farray[i].fptr == NULL ) { - fprintf(stderr,"[LOADER] %s %d %s function not found %s\n",__FILE__, __LINE__, dlerror(),farray[i].fname); - ret= -1; - } else { /* farray[i].fptr == NULL */ - loader_data.shlibs[loader_data.numshlibs].funcarray[i].fname=strdup(farray[i].fname); - loader_data.shlibs[loader_data.numshlibs].funcarray[i].fptr = farray[i].fptr; - loader_data.shlibs[loader_data.numshlibs].numfunc++; - }/* farray[i].fptr != NULL */ - } /* for int i... */ - } else { /* farray ! NULL */ - sprintf(afname,"%s_getfarray",modname); - fpg = dlsym(lib_handle,afname); - if (fpg != NULL ) { - loader_data.shlibs[loader_data.numshlibs].numfunc = fpg(&(loader_data.shlibs[loader_data.numshlibs].funcarray)); - } - } /* farray ! NULL */ - loader_data.shlibs[loader_data.numshlibs].name=strdup(modname); - loader_data.shlibs[loader_data.numshlibs].thisshlib_path=strdup(shlib_path); - - (loader_data.numshlibs)++; - } /* lib_handle != NULL */ - - if ( shlib_path!= NULL) free(shlib_path); - if ( afname!= NULL) free(afname); - if (lib_handle != NULL) dlclose(lib_handle); - return ret; + if (fpi) { + fpi(autoinit_arg); + } + + if (farray) { + if (!loader_data.shlibs[lib_idx].funcarray) { + loader_data.shlibs[lib_idx].funcarray = malloc(numf*sizeof(loader_shlibfunc_t)); + if (!loader_data.shlibs[lib_idx].funcarray) { + fprintf(stderr, "[LOADER] load_module_shlib(): unable to allocate memory\n"); + ret = -1; + goto load_module_shlib_exit; + } + } + loader_data.shlibs[lib_idx].numfunc = 0; + for (int i = 0; i < numf; i++) { + farray[i].fptr = dlsym(lib_handle,farray[i].fname); + if (!farray[i].fptr) { + fprintf(stderr, "[LOADER] load_module_shlib(): function %s not found: %s\n", + farray[i].fname, dlerror()); + ret = -1; + goto load_module_shlib_exit; + } + loader_data.shlibs[lib_idx].funcarray[i].fname=strdup(farray[i].fname); + loader_data.shlibs[lib_idx].funcarray[i].fptr = farray[i].fptr; + loader_data.shlibs[lib_idx].numfunc++; + } /* for int i... */ + } else { /* farray ! NULL */ + sprintf(afname,"%s_getfarray",modname); + fpg = dlsym(lib_handle,afname); + if (fpg) { + loader_data.shlibs[lib_idx].numfunc = + fpg(&(loader_data.shlibs[lib_idx].funcarray)); + } + } /* farray ! NULL */ + +load_module_shlib_exit: + if (shlib_path) free(shlib_path); + if (afname) free(afname); + if (lib_handle) dlclose(lib_handle); + return ret; } void * get_shlibmodule_fptr(char *modname, char *fname) diff --git a/openair1/PHY/defs_common.h b/openair1/PHY/defs_common.h index 75e6d3fce3756ca25a8ef0a3c7348b0925e53b46..e27622c05978b6cf9388296b6d6fe7d1d919b52d 100644 --- a/openair1/PHY/defs_common.h +++ b/openair1/PHY/defs_common.h @@ -995,9 +995,10 @@ static inline void wait_sync(char *thread_name) { } static inline int wakeup_thread(pthread_mutex_t *mutex,pthread_cond_t *cond,int *instance_cnt,char *name) { - - if (pthread_mutex_lock(mutex) != 0) { - LOG_E( PHY, "error locking mutex for %s\n",name); + int rc; + if ((rc = pthread_mutex_lock(mutex)) != 0) { + LOG_E(PHY, "wakeup_thread(): error locking mutex for %s (%d %s, %p)\n", + name, rc, strerror(rc), (void *)mutex); exit_fun("nothing to add"); return(-1); } @@ -1014,8 +1015,10 @@ static inline int wakeup_thread(pthread_mutex_t *mutex,pthread_cond_t *cond,int } static inline int wait_on_condition(pthread_mutex_t *mutex,pthread_cond_t *cond,int *instance_cnt,char *name) { - if (pthread_mutex_lock(mutex) != 0) { - LOG_E( PHY, "[SCHED][eNB] error locking mutex for %s\n",name); + int rc; + if ((rc = pthread_mutex_lock(mutex)) != 0) { + LOG_E(PHY, "[SCHED][eNB] wait_on_condition(): error locking mutex for %s (%d %s, %p)\n", + name, rc, strerror(rc), (void *)mutex); exit_fun("nothing to add"); return(-1); } @@ -1035,9 +1038,10 @@ static inline int wait_on_condition(pthread_mutex_t *mutex,pthread_cond_t *cond, } static inline int wait_on_busy_condition(pthread_mutex_t *mutex,pthread_cond_t *cond,int *instance_cnt,char *name) { - - if (pthread_mutex_lock(mutex) != 0) { - LOG_E( PHY, "[SCHED][eNB] error locking mutex for %s\n",name); + int rc; + if ((rc = pthread_mutex_lock(mutex)) != 0) { + LOG_E(PHY, "[SCHED][eNB] wait_on_busy_condition(): error locking mutex for %s (%d %s, %p)\n", + name, rc, strerror(rc), (void *)mutex); exit_fun("nothing to add"); return(-1); } @@ -1057,9 +1061,10 @@ static inline int wait_on_busy_condition(pthread_mutex_t *mutex,pthread_cond_t * } static inline int release_thread(pthread_mutex_t *mutex,int *instance_cnt,char *name) { - - if (pthread_mutex_lock(mutex) != 0) { - LOG_E( PHY, "[SCHED][eNB] error locking mutex for %s\n",name); + int rc; + if ((rc = pthread_mutex_lock(mutex)) != 0) { + LOG_E(PHY, "[SCHED][eNB] release_thread(): error locking mutex for %s (%d %s, %p)\n", + name, rc, strerror(rc), (void *)mutex); exit_fun("nothing to add"); return(-1); } diff --git a/openair2/COMMON/flexran_messages_def.h b/openair2/COMMON/flexran_messages_def.h new file mode 100644 index 0000000000000000000000000000000000000000..fdcdf698dc76b45c29f13a4d186b99d1fb931c2e --- /dev/null +++ b/openair2/COMMON/flexran_messages_def.h @@ -0,0 +1,29 @@ +/* + * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The OpenAirInterface Software Alliance licenses this file to You under + * the OAI Public License, Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.openairinterface.org/?page_id=698 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------------------- + * For more information about the OpenAirInterface (OAI) Software Alliance: + * contact@openairinterface.org + */ + +/* + * flexran_messages_def.h + * + * Created on: Apr 26, 2018 + * Author: R. Schmidt + */ + +MESSAGE_DEF(SOFT_RESTART_MESSAGE, MESSAGE_PRIORITY_MED_PLUS, IttiMsgEmpty, soft_restart_message) diff --git a/openair2/COMMON/messages_def.h b/openair2/COMMON/messages_def.h index 2434d157767bb2369c53eaf4aa2cb5fe07f5588b..f6e2dd0f4b0159defbbdc18c065957045972a3ec 100644 --- a/openair2/COMMON/messages_def.h +++ b/openair2/COMMON/messages_def.h @@ -38,4 +38,4 @@ #include "sctp_messages_def.h" #include "udp_messages_def.h" #include "gtpv1_u_messages_def.h" - +#include "flexran_messages_def.h" diff --git a/openair2/COMMON/platform_types.h b/openair2/COMMON/platform_types.h index 0f9fc34f836f0241dcf3e07ed9ab74114d23ee93..843f6458164472eb52776c7b416682f46d069e4c 100644 --- a/openair2/COMMON/platform_types.h +++ b/openair2/COMMON/platform_types.h @@ -107,9 +107,15 @@ typedef enum { CR_HOL = 2, CR_LC = 3, CR_CQI = 4, - CR_NUM = 5 + CR_LCP = 5, + CR_NUM = 6 } sorting_criterion_t; +typedef enum { + POL_FAIR = 0, + POL_GREEDY = 1, + POL_NUM = 2 +} accounting_policy_t; //----------------------------------------------------------------------------- // PHY TYPES //----------------------------------------------------------------------------- diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c index 549c7eaa2227e944a98747b6bcdd2f8a2ac73207..6cc3caced61a7f8324d0bf1fe2d7ccec01c6a25e 100644 --- a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c +++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c @@ -52,6 +52,18 @@ struct lfds700_misc_prng_state ps[NUM_MAX_ENB]; struct lfds700_ringbuffer_element *dl_mac_config_array[NUM_MAX_ENB]; struct lfds700_ringbuffer_state ringbuffer_state[NUM_MAX_ENB]; +/* the slice config as kept in the underlying system */ +Protocol__FlexSliceConfig *slice_config[MAX_NUM_SLICES]; +/* a structure that keeps updates which will be reflected in slice_config later */ +Protocol__FlexSliceConfig *sc_update[MAX_NUM_SLICES]; +/* indicates whether sc_update contains new data */ +int perform_slice_config_update_count = 1; +/* queue of incoming new UE<>slice association commands */ +Protocol__FlexUeConfig *ue_slice_assoc_update[MAX_NUM_SLICES]; +int n_ue_slice_assoc_updates = 0; +/* mutex for sc_update: do not receive new config and write it at the same time */ +pthread_mutex_t sc_update_mtx = PTHREAD_MUTEX_INITIALIZER; + int flexran_agent_mac_stats_reply(mid_t mod_id, const report_config_t *report_config, @@ -61,6 +73,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, // Protocol__FlexHeader *header; int i, j, k; + int UE_id; int cc_id = 0; int enb_id = mod_id; @@ -70,6 +83,8 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, for (i = 0; i < report_config->nr_ue; i++) { + UE_id = flexran_get_ue_id(mod_id, i); + ue_report[i]->rnti = report_config->ue_report_type[i].ue_rnti; ue_report[i]->has_rnti = 1; @@ -91,7 +106,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, /* Check flag for creation of PHR report */ if (report_config->ue_report_type[i].ue_report_flags & PROTOCOL__FLEX_UE_STATS_TYPE__FLUST_PHR) { - ue_report[i]->phr = flexran_get_ue_phr (enb_id, i); // eNB_UE_list->UE_template[UE_PCCID(enb_id,i)][i].phr_info; + ue_report[i]->phr = flexran_get_ue_phr (enb_id, UE_id); // eNB_UE_list->UE_template[UE_PCCID(enb_id,UE_id)][UE_id].phr_info; ue_report[i]->has_phr = 1; } @@ -113,11 +128,11 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, protocol__flex_rlc_bsr__init(rlc_reports[j]); rlc_reports[j]->lc_id = j+1; rlc_reports[j]->has_lc_id = 1; - rlc_reports[j]->tx_queue_size = flexran_get_tx_queue_size(enb_id, i, j + 1); + rlc_reports[j]->tx_queue_size = flexran_get_tx_queue_size(enb_id, UE_id, j + 1); rlc_reports[j]->has_tx_queue_size = 1; //TODO:Set tx queue head of line delay in ms - rlc_reports[j]->tx_queue_hol_delay = flexran_get_hol_delay(enb_id, i, j + 1); + rlc_reports[j]->tx_queue_hol_delay = flexran_get_hol_delay(enb_id, UE_id, j + 1); rlc_reports[j]->has_tx_queue_hol_delay = 1; //TODO:Set retransmission queue size in bytes rlc_reports[j]->retransmission_queue_size = 10; @@ -126,7 +141,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, rlc_reports[j]->retransmission_queue_hol_delay = 100; rlc_reports[j]->has_retransmission_queue_hol_delay = 0; //TODO DONE:Set current size of the pending message in bytes - rlc_reports[j]->status_pdu_size = flexran_get_num_pdus_buffer(enb_id , i, j + 1); + rlc_reports[j]->status_pdu_size = flexran_get_num_pdus_buffer(enb_id, UE_id, j + 1); rlc_reports[j]->has_status_pdu_size = 1; } @@ -140,7 +155,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, /* Check flag for creation of MAC CE buffer status report */ if (report_config->ue_report_type[i].ue_report_flags & PROTOCOL__FLEX_UE_STATS_TYPE__FLUST_MAC_CE_BS) { // TODO: Fill in the actual MAC CE buffer status report - ue_report[i]->pending_mac_ces = (flexran_get_MAC_CE_bitmap_TA(enb_id,i,0) | (0 << 1) | (0 << 2) | (0 << 3)) & 15; + ue_report[i]->pending_mac_ces = (flexran_get_MAC_CE_bitmap_TA(enb_id, UE_id, 0) | (0 << 1) | (0 << 2) | (0 << 3)) & 15; // Use as bitmap. Set one or more of the; /* Use as bitmap. Set one or more of the // PROTOCOL__FLEX_CE_TYPE__FLPCET_ values // found in stats_common.pb-c.h. See @@ -161,7 +176,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, dl_report->sfn_sn = flexran_get_sfn_sf(enb_id); dl_report->has_sfn_sn = 1; //Set the number of DL CQI reports for this UE. One for each CC - dl_report->n_csi_report = flexran_get_active_CC(enb_id,i); + dl_report->n_csi_report = flexran_get_active_CC(enb_id, UE_id); dl_report->n_csi_report = 1 ; //Create the actual CSI reports. Protocol__FlexDlCsi **csi_reports; @@ -178,7 +193,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, csi_reports[j]->serv_cell_index = j; csi_reports[j]->has_serv_cell_index = 1; //The rank indicator value for this cc - csi_reports[j]->ri = flexran_get_current_RI(enb_id,i,j); + csi_reports[j]->ri = flexran_get_current_RI(enb_id, UE_id, j); csi_reports[j]->has_ri = 1; //TODO: the type of CSI report based on the configuration of the UE //For now we only support type P10, which only needs a wideband value @@ -197,7 +212,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, protocol__flex_csi_p10__init(csi10); //TODO: set the wideband value // NN: this is also depends on cc_id - csi10->wb_cqi = flexran_get_ue_wcqi (enb_id, i); //eNB_UE_list->eNB_UE_stats[UE_PCCID(enb_id,i)][i].dl_cqi; + csi10->wb_cqi = flexran_get_ue_wcqi (enb_id, UE_id); //eNB_UE_list->eNB_UE_stats[UE_PCCID(enb_id,UE_id)][UE_id].dl_cqi; csi10->has_wb_cqi = 1; //Add the type of measurements to the csi report in the proper union type csi_reports[j]->p10csi = csi10; @@ -214,26 +229,26 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, csi11->wb_cqi = malloc(sizeof(csi11->wb_cqi)); csi11->n_wb_cqi = 1; - csi11->wb_cqi[0] = flexran_get_ue_wcqi (enb_id, i); + csi11->wb_cqi[0] = flexran_get_ue_wcqi (enb_id, UE_id); // According To spec 36.213 if (flexran_get_antenna_ports(enb_id, j) == 2 && csi_reports[j]->ri == 1) { // TODO PMI - csi11->wb_pmi = flexran_get_ue_wpmi(enb_id, i, 0); + csi11->wb_pmi = flexran_get_ue_wpmi(enb_id, UE_id, 0); csi11->has_wb_pmi = 1; } else if (flexran_get_antenna_ports(enb_id, j) == 2 && csi_reports[j]->ri == 2){ // TODO PMI - csi11->wb_pmi = flexran_get_ue_wpmi(enb_id, i, 0); + csi11->wb_pmi = flexran_get_ue_wpmi(enb_id, UE_id, 0); csi11->has_wb_pmi = 1; } else if (flexran_get_antenna_ports(enb_id, j) == 4 && csi_reports[j]->ri == 2){ // TODO PMI - csi11->wb_pmi = flexran_get_ue_wpmi(enb_id, i, 0); + csi11->wb_pmi = flexran_get_ue_wpmi(enb_id, UE_id, 0); csi11->has_wb_pmi = 1; @@ -257,7 +272,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, goto error; protocol__flex_csi_p20__init(csi20); - csi20->wb_cqi = flexran_get_ue_wcqi (enb_id, i); + csi20->wb_cqi = flexran_get_ue_wcqi (enb_id, UE_id); csi20->has_wb_cqi = 1; @@ -281,7 +296,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, // goto error; // protocol__flex_csi_p21__init(csi21); - // csi21->wb_cqi = flexran_get_ue_wcqi (enb_id, i); + // csi21->wb_cqi = flexran_get_ue_wcqi (enb_id, UE_id); // csi21->wb_pmi = flexran_get_ue_pmi(enb_id); //TDO inside @@ -309,7 +324,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, // goto error; // protocol__flex_csi_a12__init(csi12); - // csi12->wb_cqi = flexran_get_ue_wcqi (enb_id, i); + // csi12->wb_cqi = flexran_get_ue_wcqi (enb_id, UE_id); // csi12->sb_pmi = 1 ; //TODO inside @@ -324,17 +339,17 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, // goto error; // protocol__flex_csi_a22__init(csi22); - // csi22->wb_cqi = flexran_get_ue_wcqi (enb_id, i); + // csi22->wb_cqi = flexran_get_ue_wcqi (enb_id, UE_id); // csi22->sb_cqi = 1 ; //TODO inside - // csi22->wb_pmi = flexran_get_ue_wcqi (enb_id, i); + // csi22->wb_pmi = flexran_get_ue_wcqi (enb_id, UE_id); // csi22->has_wb_pmi = 1; // csi22->sb_pmi = 1 ; //TODO inside // csi22->has_wb_pmi = 1; - // csi22->sb_list = flexran_get_ue_wcqi (enb_id, i); + // csi22->sb_list = flexran_get_ue_wcqi (enb_id, UE_id); } @@ -347,7 +362,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, // goto error; // protocol__flex_csi_a20__init(csi20); - // csi20->wb_cqi = flexran_get_ue_wcqi (enb_id, i); + // csi20->wb_cqi = flexran_get_ue_wcqi (enb_id, UE_id); // csi20->has_wb_cqi = 1; // csi20>sb_cqi = 1 ; //TODO inside @@ -477,8 +492,8 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, full_ul_report->pucch_dbm[j]->has_serv_cell_index = 1; full_ul_report->pucch_dbm[j]->serv_cell_index = j; - if(flexran_get_p0_pucch_dbm(enb_id,i, j) != -1){ - full_ul_report->pucch_dbm[j]->p0_pucch_dbm = flexran_get_p0_pucch_dbm(enb_id,i,j); + if(flexran_get_p0_pucch_dbm(enb_id, UE_id, j) != -1){ + full_ul_report->pucch_dbm[j]->p0_pucch_dbm = flexran_get_p0_pucch_dbm(enb_id, UE_id, j); full_ul_report->pucch_dbm[j]->has_p0_pucch_dbm = 1; } } @@ -499,69 +514,69 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, protocol__flex_mac_stats__init(macstats); - macstats->total_bytes_sdus_dl = flexran_get_total_size_dl_mac_sdus(mod_id, i, cc_id); + macstats->total_bytes_sdus_dl = flexran_get_total_size_dl_mac_sdus(mod_id, UE_id, cc_id); macstats->has_total_bytes_sdus_dl = 1; - macstats->total_bytes_sdus_ul = flexran_get_total_size_ul_mac_sdus(mod_id, i, cc_id); + macstats->total_bytes_sdus_ul = flexran_get_total_size_ul_mac_sdus(mod_id, UE_id, cc_id); macstats->has_total_bytes_sdus_ul = 1; - macstats->tbs_dl = flexran_get_TBS_dl(mod_id, i, cc_id); + macstats->tbs_dl = flexran_get_TBS_dl(mod_id, UE_id, cc_id); macstats->has_tbs_dl = 1; - macstats->tbs_ul = flexran_get_TBS_ul(mod_id, i, cc_id); + macstats->tbs_ul = flexran_get_TBS_ul(mod_id, UE_id, cc_id); macstats->has_tbs_ul = 1; - macstats->prb_retx_dl = flexran_get_num_prb_retx_dl_per_ue(mod_id, i, cc_id); + macstats->prb_retx_dl = flexran_get_num_prb_retx_dl_per_ue(mod_id, UE_id, cc_id); macstats->has_prb_retx_dl = 1; - macstats->prb_retx_ul = flexran_get_num_prb_retx_ul_per_ue(mod_id, i, cc_id); + macstats->prb_retx_ul = flexran_get_num_prb_retx_ul_per_ue(mod_id, UE_id, cc_id); macstats->has_prb_retx_ul = 1; - macstats->prb_dl = flexran_get_num_prb_dl_tx_per_ue(mod_id, i, cc_id); + macstats->prb_dl = flexran_get_num_prb_dl_tx_per_ue(mod_id, UE_id, cc_id); macstats->has_prb_dl = 1; - macstats->prb_ul = flexran_get_num_prb_ul_rx_per_ue(mod_id, i, cc_id); + macstats->prb_ul = flexran_get_num_prb_ul_rx_per_ue(mod_id, UE_id, cc_id); macstats->has_prb_ul = 1; - macstats->mcs1_dl = flexran_get_mcs1_dl(mod_id, i, cc_id); + macstats->mcs1_dl = flexran_get_mcs1_dl(mod_id, UE_id, cc_id); macstats->has_mcs1_dl = 1; - macstats->mcs2_dl = flexran_get_mcs2_dl(mod_id, i, cc_id); + macstats->mcs2_dl = flexran_get_mcs2_dl(mod_id, UE_id, cc_id); macstats->has_mcs2_dl = 1; - macstats->mcs1_ul = flexran_get_mcs1_ul(mod_id, i, cc_id); + macstats->mcs1_ul = flexran_get_mcs1_ul(mod_id, UE_id, cc_id); macstats->has_mcs1_ul = 1; - macstats->mcs2_ul = flexran_get_mcs2_ul(mod_id, i, cc_id); + macstats->mcs2_ul = flexran_get_mcs2_ul(mod_id, UE_id, cc_id); macstats->has_mcs2_ul = 1; - macstats->total_prb_dl = flexran_get_total_prb_dl_tx_per_ue(mod_id, i, cc_id); + macstats->total_prb_dl = flexran_get_total_prb_dl_tx_per_ue(mod_id, UE_id, cc_id); macstats->has_total_prb_dl = 1; - macstats->total_prb_ul = flexran_get_total_prb_ul_rx_per_ue(mod_id, i, cc_id); + macstats->total_prb_ul = flexran_get_total_prb_ul_rx_per_ue(mod_id, UE_id, cc_id); macstats->has_total_prb_ul = 1; - macstats->total_pdu_dl = flexran_get_total_num_pdu_dl(mod_id, i, cc_id); + macstats->total_pdu_dl = flexran_get_total_num_pdu_dl(mod_id, UE_id, cc_id); macstats->has_total_pdu_dl = 1; - macstats->total_pdu_ul = flexran_get_total_num_pdu_ul(mod_id, i, cc_id); + macstats->total_pdu_ul = flexran_get_total_num_pdu_ul(mod_id, UE_id, cc_id); macstats->has_total_pdu_ul = 1; - macstats->total_tbs_dl = flexran_get_total_TBS_dl(mod_id, i, cc_id); + macstats->total_tbs_dl = flexran_get_total_TBS_dl(mod_id, UE_id, cc_id); macstats->has_total_tbs_dl = 1; - macstats->total_tbs_ul = flexran_get_total_TBS_ul(mod_id, i, cc_id); + macstats->total_tbs_ul = flexran_get_total_TBS_ul(mod_id, UE_id, cc_id); macstats->has_total_tbs_ul = 1; - macstats->harq_round = flexran_get_harq_round(mod_id, cc_id, i); + macstats->harq_round = flexran_get_harq_round(mod_id, cc_id, UE_id); macstats->has_harq_round = 1; Protocol__FlexMacSdusDl ** mac_sdus; - mac_sdus = malloc(sizeof(Protocol__FlexMacSdusDl) * flexran_get_num_mac_sdu_tx(mod_id, i, cc_id)); + mac_sdus = malloc(sizeof(Protocol__FlexMacSdusDl) * flexran_get_num_mac_sdu_tx(mod_id, UE_id, cc_id)); if (mac_sdus == NULL) goto error; - macstats->n_mac_sdus_dl = flexran_get_num_mac_sdu_tx(mod_id, i, cc_id); + macstats->n_mac_sdus_dl = flexran_get_num_mac_sdu_tx(mod_id, UE_id, cc_id); for (j = 0; j < macstats->n_mac_sdus_dl; j++){ @@ -569,10 +584,10 @@ int flexran_agent_mac_stats_reply(mid_t mod_id, mac_sdus[j] = malloc(sizeof(Protocol__FlexMacSdusDl)); protocol__flex_mac_sdus_dl__init(mac_sdus[j]); - mac_sdus[j]->lcid = flexran_get_mac_sdu_lcid_index(mod_id, i, cc_id, j); + mac_sdus[j]->lcid = flexran_get_mac_sdu_lcid_index(mod_id, UE_id, cc_id, j); mac_sdus[j]->has_lcid = 1; - mac_sdus[j]->sdu_length = flexran_get_mac_sdu_size(mod_id, i, cc_id, mac_sdus[j]->lcid); + mac_sdus[j]->sdu_length = flexran_get_mac_sdu_size(mod_id, UE_id, cc_id, mac_sdus[j]->lcid); mac_sdus[j]->has_sdu_length = 1; @@ -882,7 +897,6 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle for (j = 0; j < 8; j++) { if (RC.mac && RC.mac[mod_id] && RC.mac[mod_id]->UE_list.eNB_UE_stats[UE_PCCID(mod_id,i)][i].harq_pid == 1) { available_harq[i] = j; - sf_trigger_msg->n_dl_info++; break; } } @@ -891,10 +905,7 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle // LOG_I(FLEXRAN_AGENT, "Sending subframe trigger for frame %d and subframe %d\n", flexran_get_current_frame(mod_id), (flexran_get_current_subframe(mod_id) + 1) % 10); - /*TODO: Fill in the number of dl HARQ related info, based on the number of currently - *transmitting UEs - */ - // sf_trigger_msg->n_dl_info = flexran_get_num_ues(mod_id); + sf_trigger_msg->n_dl_info = flexran_get_num_ues(mod_id); Protocol__FlexDlInfo **dl_info = NULL; @@ -904,33 +915,31 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle goto error; i = -1; //Fill the status of the current HARQ process for each UE - for(UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) { - if (available_harq[UE_id] < 0) { + for(i = 0; i < sf_trigger_msg->n_dl_info; i++) { + if (available_harq[i] < 0) continue; - } else { - i++; - } dl_info[i] = malloc(sizeof(Protocol__FlexDlInfo)); if(dl_info[i] == NULL) goto error; + UE_id = flexran_get_ue_id(mod_id, i); protocol__flex_dl_info__init(dl_info[i]); dl_info[i]->rnti = flexran_get_ue_crnti(mod_id, UE_id); dl_info[i]->has_rnti = 1; /*Fill in the right id of this round's HARQ process for this UE*/ // uint8_t harq_id; //uint8_t harq_status; - // flexran_get_harq(mod_id, UE_PCCID(mod_id,i), i, frame, subframe, &harq_id, &harq_status); + // flexran_get_harq(mod_id, UE_PCCID(mod_id, UE_id), i, frame, subframe, &harq_id, &harq_status); dl_info[i]->harq_process_id = available_harq[UE_id]; if (RC.mac && RC.mac[mod_id]) - RC.mac[mod_id]->UE_list.eNB_UE_stats[UE_PCCID(mod_id,i)][UE_id].harq_pid = 0; + RC.mac[mod_id]->UE_list.eNB_UE_stats[UE_PCCID(mod_id, UE_id)][UE_id].harq_pid = 0; dl_info[i]->has_harq_process_id = 1; /* Fill in the status of the HARQ process (2 TBs)*/ dl_info[i]->n_harq_status = 2; dl_info[i]->harq_status = malloc(sizeof(uint32_t) * dl_info[i]->n_harq_status); for (j = 0; j < dl_info[i]->n_harq_status; j++) { - dl_info[i]->harq_status[j] = RC.mac[mod_id]->UE_list.UE_sched_ctrl[i].round[UE_PCCID(mod_id,i)][j]; + dl_info[i]->harq_status[j] = RC.mac[mod_id]->UE_list.UE_sched_ctrl[UE_id].round[UE_PCCID(mod_id, UE_id)][j]; // TODO: This should be different per TB } // LOG_I(FLEXRAN_AGENT, "Sending subframe trigger for frame %d and subframe %d and harq %d (round %d)\n", flexran_get_current_frame(mod_id), (flexran_get_current_subframe(mod_id) + 1) % 10, dl_info[i]->harq_process_id, dl_info[i]->harq_status[0]); @@ -938,7 +947,7 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle // LOG_I(FLEXRAN_AGENT, "[Frame %d][Subframe %d]Need to make a retransmission for harq %d (round %d)\n", flexran_get_current_frame(mod_id), flexran_get_current_subframe(mod_id), dl_info[i]->harq_process_id, dl_info[i]->harq_status[0]); } /*Fill in the serving cell index for this UE */ - dl_info[i]->serv_cell_index = UE_PCCID(mod_id,i); + dl_info[i]->serv_cell_index = UE_PCCID(mod_id, UE_id); dl_info[i]->has_serv_cell_index = 1; } } @@ -962,18 +971,21 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle if(ul_info[i] == NULL) goto error; protocol__flex_ul_info__init(ul_info[i]); - ul_info[i]->rnti = flexran_get_ue_crnti(mod_id, i); + + UE_id = flexran_get_ue_id(mod_id, i); + + ul_info[i]->rnti = flexran_get_ue_crnti(mod_id, UE_id); ul_info[i]->has_rnti = 1; /* Fill in the Tx power control command for this UE (if available), * primary carrier */ - if(flexran_get_tpc(mod_id, i, 0) != 1){ + if(flexran_get_tpc(mod_id, UE_id, 0) != 1){ /* assume primary carrier */ - ul_info[i]->tpc = flexran_get_tpc(mod_id, i, 0); + ul_info[i]->tpc = flexran_get_tpc(mod_id, UE_id, 0); ul_info[i]->has_tpc = 1; } else{ /* assume primary carrier */ - ul_info[i]->tpc = flexran_get_tpc(mod_id, i, 0); + ul_info[i]->tpc = flexran_get_tpc(mod_id, UE_id, 0); ul_info[i]->has_tpc = 0; } /*TODO: fill in the amount of data in bytes in the MAC SDU received in this subframe for the @@ -987,7 +999,7 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle ul_info[i]->reception_status = PROTOCOL__FLEX_RECEPTION_STATUS__FLRS_OK; ul_info[i]->has_reception_status = 1; /*Fill in the serving cell index for this UE */ - ul_info[i]->serv_cell_index = UE_PCCID(mod_id,i); + ul_info[i]->serv_cell_index = UE_PCCID(mod_id, UE_id); ul_info[i]->has_serv_cell_index = 1; } } @@ -1260,8 +1272,17 @@ void flexran_agent_init_mac_agent(mid_t mod_id) { lfds700_misc_prng_init(&ps[mod_id]); int num_elements = RINGBUFFER_SIZE + 1; //Allow RINGBUFFER_SIZE messages to be stored in the ringbuffer at any time - dl_mac_config_array[mod_id] = malloc( sizeof(struct lfds700_ringbuffer_element) * num_elements); - lfds700_ringbuffer_init_valid_on_current_logical_core( &ringbuffer_state[mod_id], dl_mac_config_array[mod_id], num_elements, &ps[mod_id], NULL ); + /* lfds700_ringbuffer_init_valid_on_current_logical_core()'s second argument + * must be aligned to LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES. From the + * documentation: "Heap allocated variables however will by no means be + * correctly aligned and an aligned malloc must be used." Therefore, we use + * posix_memalign */ + i = posix_memalign((void **)&dl_mac_config_array[mod_id], + LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES, + sizeof(struct lfds700_ringbuffer_element) * num_elements); + AssertFatal(i == 0, "posix_memalign(): could not allocate aligned memory for lfds library\n"); + lfds700_ringbuffer_init_valid_on_current_logical_core(&ringbuffer_state[mod_id], + dl_mac_config_array[mod_id], num_elements, &ps[mod_id], NULL); for (i = 0; i < MAX_MOBILES_PER_ENB; i++) { for (j = 0; j < 8; j++) { if (RC.mac && RC.mac[mod_id]) @@ -1372,8 +1393,229 @@ int flexran_agent_unregister_mac_xface(mid_t mod_id, AGENT_MAC_xface *xface) { return 0; } +void flexran_create_config_structures(mid_t mod_id) +{ + int i; + int n_dl = flexran_get_num_dl_slices(mod_id); + int m_ul = flexran_get_num_ul_slices(mod_id); + slice_config[mod_id] = flexran_agent_create_slice_config(n_dl, m_ul); + sc_update[mod_id] = flexran_agent_create_slice_config(n_dl, m_ul); + if (!slice_config[mod_id] || !sc_update[mod_id]) return; + + flexran_agent_read_slice_config(mod_id, slice_config[mod_id]); + flexran_agent_read_slice_config(mod_id, sc_update[mod_id]); + for (i = 0; i < n_dl; i++) { + flexran_agent_read_slice_dl_config(mod_id, i, slice_config[mod_id]->dl[i]); + flexran_agent_read_slice_dl_config(mod_id, i, sc_update[mod_id]->dl[i]); + } + for (i = 0; i < m_ul; i++) { + flexran_agent_read_slice_ul_config(mod_id, i, slice_config[mod_id]->ul[i]); + flexran_agent_read_slice_ul_config(mod_id, i, sc_update[mod_id]->ul[i]); + } +} +void flexran_check_and_remove_slices(mid_t mod_id) +{ + Protocol__FlexDlSlice **dl = sc_update[mod_id]->dl; + Protocol__FlexDlSlice **dlreal = slice_config[mod_id]->dl; + int i = 0; + while (i < sc_update[mod_id]->n_dl) { + /* remove slices whose percentage is zero */ + if (dl[i]->percentage > 0) { + ++i; + continue; + } + if (flexran_remove_dl_slice(mod_id, i) < 1) { + LOG_W(FLEXRAN_AGENT, "[%d] can not remove slice index %d ID %d\n", + mod_id, i, dl[i]->id); + ++i; + continue; + } + LOG_I(FLEXRAN_AGENT, "[%d] removed slice index %d ID %d\n", + mod_id, i, dl[i]->id); + if (dl[i]->n_sorting > 0) free(dl[i]->sorting); + free(dl[i]->scheduler_name); + if (dlreal[i]->n_sorting > 0) { + dlreal[i]->n_sorting = 0; + free(dlreal[i]->sorting); + } + free(dlreal[i]->scheduler_name); + --sc_update[mod_id]->n_dl; + --slice_config[mod_id]->n_dl; + const size_t last = sc_update[mod_id]->n_dl; + /* we need to memcpy the higher slice to the position we just deleted */ + memcpy(dl[i], dl[last], sizeof(*dl[last])); + memset(dl[last], 0, sizeof(*dl[last])); + memcpy(dlreal[i], dlreal[last], sizeof(*dlreal[last])); + memset(dlreal[last], 0, sizeof(*dlreal[last])); + /* dont increase i but recheck the slice which has been copied to here */ + } + Protocol__FlexUlSlice **ul = sc_update[mod_id]->ul; + Protocol__FlexUlSlice **ulreal = slice_config[mod_id]->ul; + i = 0; + while (i < sc_update[mod_id]->n_ul) { + if (ul[i]->percentage > 0) { + ++i; + continue; + } + if (flexran_remove_ul_slice(mod_id, i) < 1) { + LOG_W(FLEXRAN_AGENT, "[%d] can not remove slice index %d ID %d\n", + mod_id, i, ul[i]->id); + ++i; + continue; + } + LOG_I(FLEXRAN_AGENT, "[%d] removed slice index %d ID %d\n", + mod_id, i, ul[i]->id); + free(ul[i]->scheduler_name); + free(ulreal[i]->scheduler_name); + --sc_update[mod_id]->n_ul; + --slice_config[mod_id]->n_ul; + const size_t last = sc_update[mod_id]->n_ul; + /* see DL remarks */ + memcpy(ul[i], ul[last], sizeof(*ul[last])); + memset(ul[last], 0, sizeof(*ul[last])); + memcpy(ulreal[i], ulreal[last], sizeof(*ulreal[last])); + memset(ulreal[last], 0, sizeof(*ulreal[last])); + /* dont increase i but recheck the slice which has been copied to here */ + } +} +void flexran_agent_slice_update(mid_t mod_id) +{ + int i; + int changes = 0; + if (perform_slice_config_update_count <= 0) return; + perform_slice_config_update_count--; + pthread_mutex_lock(&sc_update_mtx); + if (!slice_config[mod_id]) { + /* if the configuration does not exist for agent, create from eNB structure + * and exit */ + flexran_create_config_structures(mod_id); + pthread_mutex_unlock(&sc_update_mtx); + return; + } + + /********* read existing config *********/ + /* simply update slice_config all the time and write new config + * (apply_new_slice_dl_config() only updates if changes are necessary) */ + slice_config[mod_id]->n_dl = flexran_get_num_dl_slices(mod_id); + slice_config[mod_id]->n_ul = flexran_get_num_ul_slices(mod_id); + for (i = 0; i < slice_config[mod_id]->n_dl; i++) { + flexran_agent_read_slice_dl_config(mod_id, i, slice_config[mod_id]->dl[i]); + } + for (i = 0; i < slice_config[mod_id]->n_ul; i++) { + flexran_agent_read_slice_ul_config(mod_id, i, slice_config[mod_id]->ul[i]); + } + + /********* write new config *********/ + /* check for removal (sc_update[X]->dl[Y].percentage == 0) + * and update sc_update & slice_config accordingly */ + flexran_check_and_remove_slices(mod_id); + + /* create new DL and UL slices if necessary */ + for (i = slice_config[mod_id]->n_dl; i < sc_update[mod_id]->n_dl; i++) { + flexran_create_dl_slice(mod_id, sc_update[mod_id]->dl[i]->id); + } + for (i = slice_config[mod_id]->n_ul; i < sc_update[mod_id]->n_ul; i++) { + flexran_create_ul_slice(mod_id, sc_update[mod_id]->ul[i]->id); + } + slice_config[mod_id]->n_dl = flexran_get_num_dl_slices(mod_id); + slice_config[mod_id]->n_ul = flexran_get_num_ul_slices(mod_id); + changes += apply_new_slice_config(mod_id, slice_config[mod_id], sc_update[mod_id]); + for (i = 0; i < slice_config[mod_id]->n_dl; i++) { + changes += apply_new_slice_dl_config(mod_id, + slice_config[mod_id]->dl[i], + sc_update[mod_id]->dl[i]); + flexran_agent_read_slice_dl_config(mod_id, i, slice_config[mod_id]->dl[i]); + } + for (i = 0; i < slice_config[mod_id]->n_ul; i++) { + changes += apply_new_slice_ul_config(mod_id, + slice_config[mod_id]->ul[i], + sc_update[mod_id]->ul[i]); + flexran_agent_read_slice_ul_config(mod_id, i, slice_config[mod_id]->ul[i]); + } + flexran_agent_read_slice_config(mod_id, slice_config[mod_id]); + if (n_ue_slice_assoc_updates > 0) { + changes += apply_ue_slice_assoc_update(mod_id); + } + if (changes > 0) + LOG_I(FLEXRAN_AGENT, "[%d] slice configuration: applied %d changes\n", mod_id, changes); + + pthread_mutex_unlock(&sc_update_mtx); +} + +Protocol__FlexSliceConfig *flexran_agent_get_slice_config(mid_t mod_id) +{ + if (!slice_config[mod_id]) return NULL; + Protocol__FlexSliceConfig *config = NULL; + + pthread_mutex_lock(&sc_update_mtx); + config = flexran_agent_create_slice_config(slice_config[mod_id]->n_dl, + slice_config[mod_id]->n_ul); + if (!config) { + pthread_mutex_unlock(&sc_update_mtx); + return NULL; + } + config->has_intraslice_share_active = 1; + config->intraslice_share_active = slice_config[mod_id]->intraslice_share_active; + config->has_interslice_share_active = 1; + config->interslice_share_active = slice_config[mod_id]->interslice_share_active; + for (int i = 0; i < slice_config[mod_id]->n_dl; ++i) { + if (!config->dl[i]) continue; + config->dl[i]->has_id = 1; + config->dl[i]->id = slice_config[mod_id]->dl[i]->id; + config->dl[i]->has_label = 1; + config->dl[i]->label = slice_config[mod_id]->dl[i]->label; + config->dl[i]->has_percentage = 1; + config->dl[i]->percentage = slice_config[mod_id]->dl[i]->percentage; + config->dl[i]->has_isolation = 1; + config->dl[i]->isolation = slice_config[mod_id]->dl[i]->isolation; + config->dl[i]->has_priority = 1; + config->dl[i]->priority = slice_config[mod_id]->dl[i]->priority; + config->dl[i]->has_position_low = 1; + config->dl[i]->position_low = slice_config[mod_id]->dl[i]->position_low; + config->dl[i]->has_position_high = 1; + config->dl[i]->position_high = slice_config[mod_id]->dl[i]->position_high; + config->dl[i]->has_maxmcs = 1; + config->dl[i]->maxmcs = slice_config[mod_id]->dl[i]->maxmcs; + config->dl[i]->n_sorting = slice_config[mod_id]->dl[i]->n_sorting; + config->dl[i]->sorting = calloc(config->dl[i]->n_sorting, sizeof(Protocol__FlexDlSorting)); + if (!config->dl[i]->sorting) config->dl[i]->n_sorting = 0; + for (int j = 0; j < config->dl[i]->n_sorting; ++j) + config->dl[i]->sorting[j] = slice_config[mod_id]->dl[i]->sorting[j]; + config->dl[i]->has_accounting = 1; + config->dl[i]->accounting = slice_config[mod_id]->dl[i]->accounting; + config->dl[i]->scheduler_name = strdup(slice_config[mod_id]->dl[i]->scheduler_name); + } + for (int i = 0; i < slice_config[mod_id]->n_ul; ++i) { + if (!config->ul[i]) continue; + config->ul[i]->has_id = 1; + config->ul[i]->id = slice_config[mod_id]->ul[i]->id; + config->ul[i]->has_label = 1; + config->ul[i]->label = slice_config[mod_id]->ul[i]->label; + config->ul[i]->has_percentage = 1; + config->ul[i]->percentage = slice_config[mod_id]->ul[i]->percentage; + config->ul[i]->has_isolation = 1; + config->ul[i]->isolation = slice_config[mod_id]->ul[i]->isolation; + config->ul[i]->has_priority = 1; + config->ul[i]->priority = slice_config[mod_id]->ul[i]->priority; + config->ul[i]->has_first_rb = 1; + config->ul[i]->first_rb = slice_config[mod_id]->ul[i]->first_rb; + config->ul[i]->has_maxmcs = 1; + config->ul[i]->maxmcs = slice_config[mod_id]->ul[i]->maxmcs; + config->ul[i]->n_sorting = slice_config[mod_id]->ul[i]->n_sorting; + config->ul[i]->sorting = calloc(config->ul[i]->n_sorting, sizeof(Protocol__FlexUlSorting)); + if (!config->ul[i]->sorting) config->ul[i]->n_sorting = 0; + for (int j = 0; j < config->ul[i]->n_sorting; ++j) + config->ul[i]->sorting[j] = slice_config[mod_id]->ul[i]->sorting[j]; + config->ul[i]->has_accounting = 1; + config->ul[i]->accounting = slice_config[mod_id]->ul[i]->accounting; + config->ul[i]->scheduler_name = strdup(slice_config[mod_id]->ul[i]->scheduler_name); + } + + pthread_mutex_unlock(&sc_update_mtx); + return config; +} diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.h b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.h index 03e7def95e04610e57503917c38dbafe9e6fb3d4..0d686a53eed6fa5c13fc39129e0aff4508b86056 100644 --- a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.h +++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.h @@ -87,4 +87,14 @@ int flexran_agent_register_mac_xface(mid_t mod_id, AGENT_MAC_xface *xface); /*Unregister technology specific callbacks*/ int flexran_agent_unregister_mac_xface(mid_t mod_id, AGENT_MAC_xface*xface); +/*************************************** + * FlexRAN agent - slice configuration * + ***************************************/ + +/* Inform controller about possibility to update slice configuration */ +void flexran_agent_slice_update(mid_t mod_id); + +/* return a pointer to the current config */ +Protocol__FlexSliceConfig *flexran_agent_get_slice_config(mid_t mod_id); + #endif diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.c b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.c index 2acee0686f62165881d256e96eb09500274d0a30..8e0dc5726979861f68ae8fc707447774059a86d9 100644 --- a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.c +++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.c @@ -31,6 +31,15 @@ #include "flexran_agent_common_internal.h" #include "flexran_agent_mac_internal.h" +#include "flexran_agent_mac_slice_verification.h" + +/* from flexran_agent_mac.c */ +extern Protocol__FlexSliceConfig *slice_config[MAX_NUM_SLICES]; +extern Protocol__FlexSliceConfig *sc_update[MAX_NUM_SLICES]; +extern int perform_slice_config_update_count; +extern Protocol__FlexUeConfig *ue_slice_assoc_update[MAX_NUM_SLICES]; +extern int n_ue_slice_assoc_updates; +extern pthread_mutex_t sc_update_mtx; Protocol__FlexranMessage * flexran_agent_generate_diff_mac_stats_report(Protocol__FlexranMessage *new_message, Protocol__FlexranMessage *old_message) { @@ -899,3 +908,721 @@ int load_dl_scheduler_function(mid_t mod_id, const char *function_name) { return -1; } + +Protocol__FlexSliceConfig *flexran_agent_create_slice_config(int n_dl, int m_ul) +{ + int i; + Protocol__FlexSliceConfig *fsc = malloc(sizeof(Protocol__FlexSliceConfig)); + if (!fsc) return NULL; + protocol__flex_slice_config__init(fsc); + + /* say there are n_dl slices but reserve memory for up to MAX_NUM_SLICES so + * we don't need to reserve again later */ + fsc->n_dl = n_dl; + fsc->dl = calloc(MAX_NUM_SLICES, sizeof(Protocol__FlexDlSlice *)); + if (!fsc->dl) fsc->n_dl = 0; + for (i = 0; i < MAX_NUM_SLICES; i++) { + fsc->dl[i] = malloc(sizeof(Protocol__FlexDlSlice)); + if (!fsc->dl[i]) continue; + protocol__flex_dl_slice__init(fsc->dl[i]); + } + + /* as above */ + fsc->n_ul = m_ul; + fsc->ul = calloc(MAX_NUM_SLICES, sizeof(Protocol__FlexUlSlice *)); + if (!fsc->ul) fsc->n_ul = 0; + for (i = 0; i < MAX_NUM_SLICES; i++) { + fsc->ul[i] = malloc(sizeof(Protocol__FlexUlSlice)); + if (!fsc->ul[i]) continue; + protocol__flex_ul_slice__init(fsc->ul[i]); + } + return fsc; +} + +void flexran_agent_read_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *s) +{ + s->intraslice_share_active = flexran_get_intraslice_sharing_active(mod_id); + s->has_intraslice_share_active = 1; + s->interslice_share_active = flexran_get_interslice_sharing_active(mod_id); + s->has_interslice_share_active = 1; +} + +void flexran_agent_read_slice_dl_config(mid_t mod_id, int slice_idx, Protocol__FlexDlSlice *dl_slice) +{ + dl_slice->id = flexran_get_dl_slice_id(mod_id, slice_idx); + dl_slice->has_id = 1; + /* read label from corresponding sc_update entry or give default */ + dl_slice->label = PROTOCOL__FLEX_SLICE_LABEL__xMBB; + dl_slice->has_label = 1; + for (int i = 0; i < sc_update[mod_id]->n_dl; i++) { + if (sc_update[mod_id]->dl[i]->id == dl_slice->id + && sc_update[mod_id]->dl[i]->has_label) { + dl_slice->label = sc_update[mod_id]->dl[i]->label; + break; + } + } + dl_slice->percentage = flexran_get_dl_slice_percentage(mod_id, slice_idx); + dl_slice->has_percentage = 1; + dl_slice->isolation = flexran_get_dl_slice_isolation(mod_id, slice_idx); + dl_slice->has_isolation = 1; + dl_slice->priority = flexran_get_dl_slice_priority(mod_id, slice_idx); + dl_slice->has_priority = 1; + dl_slice->position_low = flexran_get_dl_slice_position_low(mod_id, slice_idx); + dl_slice->has_position_low = 1; + dl_slice->position_high = flexran_get_dl_slice_position_high(mod_id, slice_idx); + dl_slice->has_position_high = 1; + dl_slice->maxmcs = flexran_get_dl_slice_maxmcs(mod_id, slice_idx); + dl_slice->has_maxmcs = 1; + dl_slice->n_sorting = flexran_get_dl_slice_sorting(mod_id, slice_idx, &dl_slice->sorting); + if (dl_slice->n_sorting < 1) dl_slice->sorting = NULL; + dl_slice->accounting = flexran_get_dl_slice_accounting_policy(mod_id, slice_idx); + dl_slice->has_accounting = 1; + const char *s_name = flexran_get_dl_slice_scheduler(mod_id, slice_idx); + if (!dl_slice->scheduler_name + || strcmp(dl_slice->scheduler_name, s_name) != 0) { + dl_slice->scheduler_name = realloc(dl_slice->scheduler_name, strlen(s_name) + 1); + strcpy(dl_slice->scheduler_name, s_name); + } +} + +void flexran_agent_read_slice_ul_config(mid_t mod_id, int slice_idx, Protocol__FlexUlSlice *ul_slice) +{ + ul_slice->id = flexran_get_ul_slice_id(mod_id, slice_idx); + ul_slice->has_id = 1; + /* read label from corresponding sc_update entry or give default */ + ul_slice->label = PROTOCOL__FLEX_SLICE_LABEL__xMBB; + ul_slice->has_label = 1; + for (int i = 0; i < sc_update[mod_id]->n_ul; i++) { + if (sc_update[mod_id]->ul[i]->id == ul_slice->id + && sc_update[mod_id]->ul[i]->has_label) { + ul_slice->label = sc_update[mod_id]->ul[i]->label; + break; + } + } + ul_slice->percentage = flexran_get_ul_slice_percentage(mod_id, slice_idx); + ul_slice->has_percentage = 1; + /*ul_slice->isolation = flexran_get_ul_slice_isolation(mod_id, slice_idx);*/ + ul_slice->has_isolation = 0; + /*ul_slice->priority = flexran_get_ul_slice_priority(mod_id, slice_idx);*/ + ul_slice->has_priority = 0; + ul_slice->first_rb = flexran_get_ul_slice_first_rb(mod_id, slice_idx); + ul_slice->has_first_rb = 1; + /*ul_slice-> = flexran_get_ul_slice_length_rb(mod_id, slice_idx); + ul_slice->has_length_rb = 0;*/ + ul_slice->maxmcs = flexran_get_ul_slice_maxmcs(mod_id, slice_idx); + ul_slice->has_maxmcs = 1; + ul_slice->n_sorting = 0; + /*if (ul_slice->sorting) { + free(ul_slice->sorting); + ul_slice->sorting = NULL; + } + ul_slice->n_sorting = flexran_get_ul_slice_sorting(mod_id, slice_idx, &ul_slice->sorting); + if (ul_slice->n_sorting < 1) ul_slice->sorting = NULL;*/ + /*ul_slice->accounting = flexran_get_ul_slice_accounting_policy(mod_id, slice_idx);*/ + ul_slice->has_accounting = 0; + const char *s_name = flexran_get_ul_slice_scheduler(mod_id, slice_idx); + if (!ul_slice->scheduler_name + || strcmp(ul_slice->scheduler_name, s_name) != 0) { + ul_slice->scheduler_name = realloc(ul_slice->scheduler_name, strlen(s_name) + 1); + strcpy(ul_slice->scheduler_name, s_name); + } +} + +int check_dl_sorting_update(Protocol__FlexDlSlice *old, Protocol__FlexDlSlice *new) +{ + /* sorting_update => true when old->n_sorting == 0 or different numbers of + * elements; otherwise will check * element-wise */ + int sorting_update = old->n_sorting == 0 || (old->n_sorting != new->n_sorting); + for (int i = 0; i < old->n_sorting && !sorting_update; ++i) { + sorting_update = sorting_update || (new->sorting[i] != old->sorting[i]); + } + return sorting_update; +} + +int check_ul_sorting_update(Protocol__FlexUlSlice *old, Protocol__FlexUlSlice *new) +{ + /* sorting_update => true when old->n_sorting == 0 or different numbers of + * elements; otherwise will check * element-wise */ + int sorting_update = old->n_sorting == 0 || (old->n_sorting != new->n_sorting); + for (int i = 0; i < old->n_sorting && !sorting_update; ++i) { + sorting_update = sorting_update || (new->sorting[i] != old->sorting[i]); + } + return sorting_update; +} + +void overwrite_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *exist, Protocol__FlexSliceConfig *update) +{ + if (update->has_intraslice_share_active + && exist->intraslice_share_active != update->intraslice_share_active) { + LOG_I(FLEXRAN_AGENT, "[%d] update intraslice_share_active: %d -> %d\n", + mod_id, exist->intraslice_share_active, update->intraslice_share_active); + exist->intraslice_share_active = update->intraslice_share_active; + exist->has_intraslice_share_active = 1; + } + if (update->has_interslice_share_active + && exist->interslice_share_active != update->interslice_share_active) { + LOG_I(FLEXRAN_AGENT, "[%d] update interslice_share_active: %d -> %d\n", + mod_id, exist->interslice_share_active, update->interslice_share_active); + exist->interslice_share_active = update->interslice_share_active; + exist->has_interslice_share_active = 1; + } +} + +void overwrite_slice_config_dl(mid_t mod_id, Protocol__FlexDlSlice *exist, Protocol__FlexDlSlice *update) +{ + if (update->label != exist->label) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update label: %d -> %d\n", + mod_id, update->id, exist->label, update->label); + exist->label = update->label; + exist->has_label = 1; + } + if (update->percentage != exist->percentage) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update percentage: %d -> %d\n", + mod_id, update->id, exist->percentage, update->percentage); + exist->percentage = update->percentage; + exist->has_percentage = 1; + } + if (update->isolation != exist->isolation) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update isolation: %d -> %d\n", + mod_id, update->id, exist->isolation, update->isolation); + exist->isolation = update->isolation; + exist->has_isolation = 1; + } + if (update->priority != exist->priority) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update priority: %d -> %d\n", + mod_id, update->id, exist->priority, update->priority); + exist->priority = update->priority; + exist->has_priority = 1; + } + if (update->position_low != exist->position_low) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update position_low: %d -> %d\n", + mod_id, update->id, exist->position_low, update->position_low); + exist->position_low = update->position_low; + exist->has_position_low = 1; + } + if (update->position_high != exist->position_high) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update position_high: %d -> %d\n", + mod_id, update->id, exist->position_high, update->position_high); + exist->position_high = update->position_high; + exist->has_position_high = 1; + } + if (update->maxmcs != exist->maxmcs) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update maxmcs: %d -> %d\n", + mod_id, update->id, exist->maxmcs, update->maxmcs); + exist->maxmcs = update->maxmcs; + exist->has_maxmcs = 1; + } + if (check_dl_sorting_update(exist, update)) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update sorting array\n", mod_id, update->id); + if (exist->n_sorting != update->n_sorting) { + exist->n_sorting = update->n_sorting; + exist->sorting = realloc(exist->sorting, exist->n_sorting * sizeof(Protocol__FlexDlSorting)); + if (!exist->sorting) exist->n_sorting = 0; + } + for (int i = 0; i < exist->n_sorting; i++) + exist->sorting[i] = update->sorting[i]; + } + if (update->accounting != exist->accounting) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update accounting: %d -> %d\n", + mod_id, update->id, exist->accounting, update->accounting); + exist->accounting = update->accounting; + exist->has_accounting = 1; + } + if (!exist->scheduler_name + || strcmp(update->scheduler_name, exist->scheduler_name) != 0) { + LOG_I(FLEXRAN_AGENT, "[%d][DL slice %d] update scheduler: %s -> %s\n", + mod_id, update->id, exist->scheduler_name, update->scheduler_name); + if (exist->scheduler_name) free(exist->scheduler_name); + exist->scheduler_name = strdup(update->scheduler_name); + } +} + +void overwrite_slice_config_ul(mid_t mod_id, Protocol__FlexUlSlice *exist, Protocol__FlexUlSlice *update) +{ + if (update->label != exist->label) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update label: %d -> %d\n", + mod_id, update->id, exist->label, update->label); + exist->label = update->label; + exist->has_label = 1; + } + if (update->percentage != exist->percentage) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update percentage: %d -> %d\n", + mod_id, update->id, exist->percentage, update->percentage); + exist->percentage = update->percentage; + exist->has_percentage = 1; + } + if (update->isolation != exist->isolation) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update isolation: %d -> %d\n", + mod_id, update->id, exist->isolation, update->isolation); + exist->isolation = update->isolation; + exist->has_isolation = 1; + } + if (update->priority != exist->priority) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update priority: %d -> %d\n", + mod_id, update->id, exist->priority, update->priority); + exist->priority = update->priority; + exist->has_priority = 1; + } + if (update->first_rb != exist->first_rb) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update first_rb: %d -> %d\n", + mod_id, update->id, exist->first_rb, update->first_rb); + exist->first_rb = update ->first_rb; + exist->has_first_rb = 1; + } + /*if (update->lenght_rb != exist->lenght_rb) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update lenght_rb: %d -> %d\n", + mod_id, update->id, exist->lenght_rb, update->lenght_rb); + exist->lenght_rb = update->lenght_rb; + }*/ + if (update->maxmcs != exist->maxmcs) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update maxmcs: %d -> %d\n", + mod_id, update->id, exist->maxmcs, update->maxmcs); + exist->maxmcs = update->maxmcs; + exist->has_maxmcs = 1; + } + /* TODO + int sorting_update = 0; + int n = min(exist->n_sorting, update->n_sorting); + int i = 0; + while (i < n && !sorting_update) { + sorting_update = sorting_update || (update->sorting[i] != exist->sorting[i]); + i++; + } + if (sorting_update) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update sorting array\n", update->id, mod_id); + if (exist->n_sorting != update->n_sorting) + LOG_W(FLEXRAN_AGENT, "[%d][UL slice %d] only writing %d elements\n", + mod_id, update->id, n); + for (i = 0; i < n; i++) + exist->sorting[i] = update->sorting[i]; + } + */ + if (update->accounting != exist->accounting) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update accounting: %d -> %d\n", + mod_id, update->id, exist->accounting, update->accounting); + exist->accounting = update->accounting; + exist->has_accounting = 1; + } + if (!exist->scheduler_name + || strcmp(update->scheduler_name, exist->scheduler_name) != 0) { + LOG_I(FLEXRAN_AGENT, "[%d][UL slice %d] update scheduler: %s -> %s\n", + mod_id, update->id, exist->scheduler_name, update->scheduler_name); + if (exist->scheduler_name) free(exist->scheduler_name); + exist->scheduler_name = strdup(update->scheduler_name); + } +} + +void fill_dl_slice(mid_t mod_id, Protocol__FlexDlSlice *s, Protocol__FlexDlSlice *from) +{ + /* function fills slice with information from another slice or with default + * values (currently slice 0) if from is NULL */ + /* TODO fill the slice depending on the chosen label */ + if (!s->has_label) { + s->has_label = 1; + s->label = from ? from->label : sc_update[mod_id]->dl[0]->label; + } + if (!s->has_percentage) { + s->has_percentage = 1; + s->percentage = from ? from->percentage : sc_update[mod_id]->dl[0]->percentage; + } + if (!s->has_isolation) { + s->has_isolation = 1; + s->isolation = from ? from->isolation : sc_update[mod_id]->dl[0]->isolation; + } + if (!s->has_priority) { + s->has_priority = 1; + s->priority = from ? from->priority : sc_update[mod_id]->dl[0]->priority; + } + if (!s->has_position_low) { + s->has_position_low = 1; + s->position_low = from ? from->position_low : sc_update[mod_id]->dl[0]->position_low; + } + if (!s->has_position_high) { + s->has_position_high = 1; + s->position_high = from ? from->position_high : sc_update[mod_id]->dl[0]->position_high; + } + if (!s->has_maxmcs) { + s->has_maxmcs = 1; + s->maxmcs = from ? from->maxmcs : sc_update[mod_id]->dl[0]->maxmcs; + } + if (s->n_sorting == 0) { + s->n_sorting = from ? from->n_sorting : sc_update[mod_id]->dl[0]->n_sorting; + s->sorting = calloc(s->n_sorting, sizeof(Protocol__FlexDlSorting)); + if (!s->sorting) s->n_sorting = 0; + for (int i = 0; i < s->n_sorting; ++i) + s->sorting[i] = from ? from->sorting[i] : sc_update[0]->dl[0]->sorting[i]; + } + if (!s->has_accounting) { + s->accounting = from ? from->accounting : sc_update[mod_id]->dl[0]->accounting; + } + if (!s->scheduler_name) { + s->scheduler_name = strdup(from ? from->scheduler_name : sc_update[mod_id]->dl[0]->scheduler_name); + } +} + +Protocol__FlexDlSlice *get_existing_dl_slice(mid_t mod_id, int id) +{ + for (int i = 0; i < sc_update[mod_id]->n_dl; ++i) { + if (id == sc_update[mod_id]->dl[i]->id) { + return sc_update[mod_id]->dl[i]; + } + } + return NULL; +} + +Protocol__FlexDlSlice *create_new_dl_slice(mid_t mod_id, int id) +{ + LOG_I(FLEXRAN_AGENT, + "[%d] Creating DL slice with ID %d, taking default values from DL slice 0\n", + mod_id, id); + Protocol__FlexDlSlice *to = sc_update[mod_id]->dl[sc_update[mod_id]->n_dl]; + sc_update[mod_id]->n_dl++; + AssertFatal(sc_update[mod_id]->n_dl <= MAX_NUM_SLICES, + "cannot create more than MAX_NUM_SLICES\n"); + to->id = id; + return to; +} + +void fill_ul_slice(mid_t mod_id, Protocol__FlexUlSlice *s, Protocol__FlexUlSlice *from) +{ + /* function fills slice with information from another slice or with default + * values (currently slice 0) if from is NULL */ + /* TODO fill the slice depending on the chosen label */ + if (!s->has_label) { + s->has_label = 1; + s->label = from ? from->label : sc_update[mod_id]->ul[0]->label; + } + if (!s->has_percentage) { + s->has_percentage = 1; + s->percentage = from ? from->percentage : sc_update[mod_id]->ul[0]->percentage; + } + if (!s->has_isolation) { + s->has_isolation = 1; + s->isolation = from ? from->isolation : sc_update[mod_id]->ul[0]->isolation; + } + if (!s->has_priority) { + s->has_priority = 1; + s->priority = from ? from->priority : sc_update[mod_id]->ul[0]->priority; + } + if (!s->has_first_rb) { + s->has_first_rb = 1; + s->first_rb = from ? from->first_rb : sc_update[mod_id]->ul[0]->first_rb; + } + if (!s->has_maxmcs) { + s->has_maxmcs = 1; + s->maxmcs = from ? from->maxmcs : sc_update[mod_id]->ul[0]->maxmcs; + } + if (s->n_sorting == 0) { + s->n_sorting = from ? from->n_sorting : sc_update[0]->ul[0]->n_sorting; + s->sorting = calloc(s->n_sorting, sizeof(Protocol__FlexUlSorting)); + if (!s->sorting) s->n_sorting = 0; + for (int i = 0; i < s->n_sorting; ++i) + s->sorting[i] = from ? from->sorting[i] : sc_update[0]->ul[0]->sorting[i]; + } + if (!s->has_accounting) { + s->accounting = from ? from->accounting : sc_update[0]->ul[0]->accounting; + } + if (!s->scheduler_name) { + s->scheduler_name = strdup(from ? from->scheduler_name : sc_update[mod_id]->ul[0]->scheduler_name); + } +} + +Protocol__FlexUlSlice *get_existing_ul_slice(mid_t mod_id, int id) +{ + for (int i = 0; i < sc_update[mod_id]->n_ul; ++i) { + if (id == sc_update[mod_id]->ul[i]->id) { + return sc_update[mod_id]->ul[i]; + } + } + return NULL; +} + +Protocol__FlexUlSlice *create_new_ul_slice(mid_t mod_id, int id) +{ + LOG_I(FLEXRAN_AGENT, + "[%d] Creating UL slice with ID %d, taking default values from UL slice 0\n", + mod_id, id); + Protocol__FlexUlSlice *to = sc_update[mod_id]->ul[sc_update[mod_id]->n_ul]; + sc_update[mod_id]->n_ul++; + AssertFatal(sc_update[mod_id]->n_ul <= MAX_NUM_SLICES, + "cannot create more than MAX_NUM_SLICES\n"); + to->id = id; + return to; +} + +void prepare_update_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *sup) +{ + int verified = 1; + if (!sc_update[mod_id]) { + LOG_E(FLEXRAN_AGENT, "Can not update slice policy (no existing slice profile)\n"); + return; + } + + pthread_mutex_lock(&sc_update_mtx); + /* no need for tests in the current state as there are only two protobuf + * bools for intra-/interslice sharing. The function applies new values if + * applicable */ + overwrite_slice_config(mod_id, sc_update[mod_id], sup); + + if (sup->n_dl == 0) { + LOG_I(FLEXRAN_AGENT, "[%d] no DL slice configuration in flex_slice_config message\n", mod_id); + } else { + /* verify slice parameters */ + for (int i = 0; i < sup->n_dl; i++) { + if (!sup->dl[i]->has_id) { + verified = 0; + break; + } + Protocol__FlexDlSlice *dls = get_existing_dl_slice(mod_id, sup->dl[i]->id); + /* fill up so that the slice is complete. This way, we don't need to + * worry about it later */ + fill_dl_slice(mod_id, sup->dl[i], dls); + verified = verified && flexran_verify_dl_slice(mod_id, sup->dl[i]); + if (!verified) break; + } + + /* verify group-based parameters (e.g. sum percentage should not exceed + * 100%). Can be used to perform admission control */ + verified = verified && flexran_verify_group_dl_slices(mod_id, + sc_update[mod_id]->dl, sc_update[mod_id]->n_dl, sup->dl, sup->n_dl); + + if (verified) { + for (int i = 0; i < sup->n_dl; i++) { + /* if all verifications were successful, get existing slice for ID or + * create new one and overwrite with the update */ + Protocol__FlexDlSlice *dls = get_existing_dl_slice(mod_id, sup->dl[i]->id); + if (!dls) dls = create_new_dl_slice(mod_id, sup->dl[i]->id); + overwrite_slice_config_dl(mod_id, dls, sup->dl[i]); + } + } else { + LOG_E(FLEXRAN_AGENT, "[%d] DL slice verification failed, refusing application\n", mod_id); + } + } + + verified = 1; + if (sup->n_ul == 0) { + LOG_I(FLEXRAN_AGENT, "[%d] no UL slice configuration in flex_slice_config message\n", mod_id); + } else { + /* verify slice parameters */ + for (int i = 0; i < sup->n_ul; i++) { + if (!sup->ul[i]->has_id) { + verified = 0; + break; + } + Protocol__FlexUlSlice *uls = get_existing_ul_slice(mod_id, sup->ul[i]->id); + /* fill up so that the slice is complete. This way, we don't need to + * worry about it later */ + fill_ul_slice(mod_id, sup->ul[i], uls); + verified = verified && flexran_verify_ul_slice(mod_id, sup->ul[i]); + if (!verified) break; + } + + /* verify group-based parameters (e.g. sum percentage should not exceed + * 100%). Can be used to perform admission control */ + verified = verified && flexran_verify_group_ul_slices(mod_id, + sc_update[mod_id]->ul, sc_update[mod_id]->n_ul, sup->ul, sup->n_ul); + + if (verified) { + for (int i = 0; i < sup->n_ul; i++) { + /* if all verifications were successful, get existing slice for ID or + * create new one and overwrite with the update */ + Protocol__FlexUlSlice *uls = get_existing_ul_slice(mod_id, sup->ul[i]->id); + if (!uls) uls = create_new_ul_slice(mod_id, sup->ul[i]->id); + overwrite_slice_config_ul(mod_id, uls, sup->ul[i]); + } + } else { + LOG_E(FLEXRAN_AGENT, "[%d] UL slice verification failed, refusing application\n", mod_id); + } + } + pthread_mutex_unlock(&sc_update_mtx); + + perform_slice_config_update_count = 1; +} + +int apply_new_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *olds, Protocol__FlexSliceConfig *news) +{ + /* not setting the old configuration is intentional, as it will be picked up + * later when reading the configuration. There is thus a direct feedback + * whether it has been set. */ + int changes = 0; + if (olds->intraslice_share_active != news->intraslice_share_active) { + flexran_set_intraslice_sharing_active(mod_id, news->intraslice_share_active); + changes++; + } + if (olds->interslice_share_active != news->interslice_share_active) { + flexran_set_interslice_sharing_active(mod_id, news->interslice_share_active); + changes++; + } + return changes; +} + +int apply_new_slice_dl_config(mid_t mod_id, Protocol__FlexDlSlice *oldc, Protocol__FlexDlSlice *newc) +{ + /* not setting the old configuration is intentional, as it will be picked up + * later when reading the configuration. There is thus a direct feedback + * whether it has been set. */ + int changes = 0; + int slice_idx = flexran_find_dl_slice(mod_id, newc->id); + if (slice_idx < 0) { + LOG_W(FLEXRAN_AGENT, "[%d] cannot find index for slice ID %d\n", mod_id, newc->id); + return 0; + } + if (oldc->percentage != newc->percentage) { + flexran_set_dl_slice_percentage(mod_id, slice_idx, newc->percentage); + changes++; + } + if (oldc->isolation != newc->isolation) { + flexran_set_dl_slice_isolation(mod_id, slice_idx, newc->isolation); + changes++; + } + if (oldc->priority != newc->priority) { + flexran_set_dl_slice_priority(mod_id, slice_idx, newc->priority); + changes++; + } + if (oldc->position_low != newc->position_low) { + flexran_set_dl_slice_position_low(mod_id, slice_idx, newc->position_low); + changes++; + } + if (oldc->position_high != newc->position_high) { + flexran_set_dl_slice_position_high(mod_id, slice_idx, newc->position_high); + changes++; + } + if (oldc->maxmcs != newc->maxmcs) { + flexran_set_dl_slice_maxmcs(mod_id, slice_idx, newc->maxmcs); + changes++; + } + if (check_dl_sorting_update(oldc, newc)) { + flexran_set_dl_slice_sorting(mod_id, slice_idx, newc->sorting, newc->n_sorting); + changes++; + } + if (oldc->accounting != newc->accounting) { + flexran_set_dl_slice_accounting_policy(mod_id, slice_idx, newc->accounting); + changes++; + } + if (!oldc->scheduler_name + || strcmp(oldc->scheduler_name, newc->scheduler_name) != 0) { + int ret = flexran_set_dl_slice_scheduler(mod_id, slice_idx, newc->scheduler_name); + AssertFatal(ret, "could not set DL slice scheduler for slice %d idx %d\n", + newc->id, slice_idx); + changes++; + } + return changes; +} + +int apply_new_slice_ul_config(mid_t mod_id, Protocol__FlexUlSlice *oldc, Protocol__FlexUlSlice *newc) +{ + /* not setting the old configuration is intentional, as it will be picked up + * later when reading the configuration. There is thus a direct feedback + * whether it has been set. */ + int changes = 0; + int slice_idx = flexran_find_ul_slice(mod_id, newc->id); + if (slice_idx < 0) { + LOG_W(FLEXRAN_AGENT, "[%d] cannot find index for slice ID %d\n", mod_id, newc->id); + return 0; + } + if (oldc->percentage != newc->percentage) { + flexran_set_ul_slice_percentage(mod_id, slice_idx, newc->percentage); + changes++; + } + if (oldc->isolation != newc->isolation) { + /*flexran_set_ul_slice_isolation(mod_id, slice_idx, newc->isolation); + *changes++;*/ + LOG_W(FLEXRAN_AGENT, "[%d][UL slice %d] setting isolation is not supported\n", + mod_id, newc->id); + } + if (oldc->priority != newc->priority) { + /*flexran_set_ul_slice_priority(mod_id, slice_idx, newc->priority); + *changes++;*/ + LOG_W(FLEXRAN_AGENT, "[%d][UL slice %d] setting the priority is not supported\n", + mod_id, newc->id); + } + if (oldc->first_rb != newc->first_rb) { + flexran_set_ul_slice_first_rb(mod_id, slice_idx, newc->first_rb); + changes++; + } + /*if (oldc->length_rb != newc->length_rb) { + flexran_set_ul_slice_length_rb(mod_id, slice_idx, newc->length_rb); + changes++; + LOG_W(FLEXRAN_AGENT, "[%d][UL slice %d] setting length_rb is not supported\n", + mod_id, newc->id); + }*/ + if (oldc->maxmcs != newc->maxmcs) { + flexran_set_ul_slice_maxmcs(mod_id, slice_idx, newc->maxmcs); + changes++; + } + /*if (check_ul_sorting_update(oldc, newc)) { + flexran_set_ul_slice_sorting(mod_id, slice_idx, newc->sorting, n); + changes++; + LOG_W(FLEXRAN_AGENT, "[%d][UL slice %d] setting the sorting is not supported\n", + mod_id, newc->id); + }*/ + if (oldc->accounting != newc->accounting) { + /*flexran_set_ul_slice_accounting_policy(mod_id, slice_idx, newc->accounting); + *changes++;*/ + LOG_W(FLEXRAN_AGENT, "[%d][UL slice %d] setting the accounting is not supported\n", + mod_id, newc->id); + } + if (!oldc->scheduler_name + || strcmp(oldc->scheduler_name, newc->scheduler_name) != 0) { + int ret = flexran_set_ul_slice_scheduler(mod_id, slice_idx, newc->scheduler_name); + AssertFatal(ret, "could not set DL slice scheduler for slice %d idx %d\n", + newc->id, slice_idx); + changes++; + } + return changes; +} + +void prepare_ue_slice_assoc_update(mid_t mod_id, Protocol__FlexUeConfig *ue_config) +{ + if (n_ue_slice_assoc_updates == MAX_NUM_SLICES) { + LOG_E(FLEXRAN_AGENT, + "[%d] can not handle flex_ue_config message, buffer is full; try again later\n", + mod_id); + return; + } + if (!ue_config->has_rnti) { + LOG_E(FLEXRAN_AGENT, + "[%d] cannot update UE to slice association, no RNTI in flex_ue_config message\n", + mod_id); + return; + } + if (ue_config->has_dl_slice_id) + LOG_I(FLEXRAN_AGENT, "[%d] associating UE RNTI %#x to DL slice ID %d\n", + mod_id, ue_config->rnti, ue_config->dl_slice_id); + if (ue_config->has_ul_slice_id) + LOG_I(FLEXRAN_AGENT, "[%d] associating UE RNTI %#x to UL slice ID %d\n", + mod_id, ue_config->rnti, ue_config->ul_slice_id); + ue_slice_assoc_update[n_ue_slice_assoc_updates++] = ue_config; + perform_slice_config_update_count = 2; +} + +int apply_ue_slice_assoc_update(mid_t mod_id) +{ + int i; + int changes = 0; + for (i = 0; i < n_ue_slice_assoc_updates; i++) { + int ue_id = find_UE_id(mod_id, ue_slice_assoc_update[i]->rnti); + if (ue_slice_assoc_update[i]->has_dl_slice_id) { + int slice_idx = flexran_find_dl_slice(mod_id, ue_slice_assoc_update[i]->dl_slice_id); + if (flexran_dl_slice_exists(mod_id, slice_idx)) { + flexran_set_ue_dl_slice_idx(mod_id, ue_id, slice_idx); + changes++; + } else { + LOG_W(FLEXRAN_AGENT, "[%d] DL slice %d does not exist, refusing change\n", + mod_id, ue_slice_assoc_update[i]->dl_slice_id); + } + } + if (ue_slice_assoc_update[i]->has_ul_slice_id) { + int slice_idx = flexran_find_ul_slice(mod_id, ue_slice_assoc_update[i]->ul_slice_id); + if (flexran_ul_slice_exists(mod_id, slice_idx)) { + flexran_set_ue_ul_slice_idx(mod_id, ue_id, slice_idx); + changes++; + } else { + LOG_W(FLEXRAN_AGENT, "[%d] UL slice %d does not exist, refusing change\n", + mod_id, ue_slice_assoc_update[i]->ul_slice_id); + } + } + } + n_ue_slice_assoc_updates = 0; + return changes; +} diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.h b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.h index f69e2cde4098ad5d035a522c9815632bffb37312..01fc51f6a6c9200def3d6df20e1c7554088a622f 100644 --- a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.h +++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.h @@ -107,4 +107,45 @@ int parse_ul_scheduler_parameters(mid_t mod_id, yaml_parser_t *parser); int load_dl_scheduler_function(mid_t mod_id, const char *function_name); +/*** Functions for handling a slice config ***/ + +/* allocate memory for a Protocol__FlexSliceConfig structure with n_dl DL slice + * configs and m_ul UL slice configs */ +Protocol__FlexSliceConfig *flexran_agent_create_slice_config(int n_dl, int m_ul); + +/* read the general slice parameters via RAN into the given + * Protocol__FlexSliceConfig struct */ +void flexran_agent_read_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *s); + +/* read the DL slice config via the RAN into a given Protocol__FlexDlSlice + * struct */ +void flexran_agent_read_slice_dl_config(mid_t mod_id, int slice_idx, Protocol__FlexDlSlice *dl_slice); + +/* read the UL slice config via the RAN into a given Protocol__FlexUlSlice + * struct */ +void flexran_agent_read_slice_ul_config(mid_t mod_id, int slice_idx, Protocol__FlexUlSlice *ul_slice); + +/* reads content of slice over the sc_update structure, so that it can be + * applied later by performing a diff between slice_config and sc_update */ +void prepare_update_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *slice); + +/* apply generic slice parameters (e.g. intra-/interslice sharing activated or + * not) if there are changes. Returns the number of changed parameters. */ +int apply_new_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *olds, Protocol__FlexSliceConfig *news); + +/* apply new configuration of slice in DL if there are changes between the + * parameters. Returns the number of changed parameters. */ +int apply_new_slice_dl_config(mid_t mod_id, Protocol__FlexDlSlice *oldc, Protocol__FlexDlSlice *newc); + +/* apply new configuration of slice in UL if there are changes between the + * parameters. Returns the number of changed parameters. */ +int apply_new_slice_ul_config(mid_t mod_id, Protocol__FlexUlSlice *oldc, Protocol__FlexUlSlice *newc); + +/* inserts a new ue_config into the structure keeping ue to slice association + * updates and marks so it can be applied */ +void prepare_ue_slice_assoc_update(mid_t mod_id, Protocol__FlexUeConfig *ue_config); + +/* apply a new association between a UE and a slice (both DL and UL) */ +int apply_ue_slice_assoc_update(mid_t mod_id); + #endif /*FLEXRAN_AGENT_MAC_INTERNAL_H_*/ diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_slice_verification.c b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_slice_verification.c new file mode 100644 index 0000000000000000000000000000000000000000..67329e91be0d9508c5943dd527dfb990ef654201 --- /dev/null +++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_slice_verification.c @@ -0,0 +1,373 @@ +/* + * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The OpenAirInterface Software Alliance licenses this file to You under + * the OAI Public License, Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.openairinterface.org/?page_id=698 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------------------- + * For more information about the OpenAirInterface (OAI) Software Alliance: + * contact@openairinterface.org + */ + +/*! \file flexran_agent_mac_slice_verification.c + * \brief MAC Agent slice verification helper functions + * \author Robert Schmidt + * \date 2018 + * \version 0.1 + */ + + +#include "flexran_agent_mac_slice_verification.h" + +/* overlap check for UL slices, helper type */ +struct sregion_s { + int start; + int length; +}; + +/* forward declaration of locally-used verification functions */ +int flexran_dl_slice_verify_pct(int pct); +int flexran_dl_slice_verify_priority(int prio); +int flexran_dl_slice_verify_position(int pos_low, int pos_high); +int flexran_dl_slice_verify_maxmcs(int maxmcs); +int flexran_ul_slice_verify_pct(int pct); +int flexran_ul_slice_verify_priority(int prio); +int flexran_ul_slice_verify_first_rb(int first_rb); +int flexran_ul_slice_verify_maxmcs(int maxmcs); +int check_ul_slice_overlap(mid_t mod_id, struct sregion_s *sr, int n); + +int flexran_verify_dl_slice(mid_t mod_id, Protocol__FlexDlSlice *dls) +{ + /* check mandatory parameters */ + if (!dls->has_id) { + LOG_E(FLEXRAN_AGENT, "[%d] Incoming DL slice configuration has no ID\n", mod_id); + return 0; + } + + /* verify parameters individualy */ + /* label is enum */ + if (!flexran_dl_slice_verify_pct(dls->percentage)) { + LOG_E(FLEXRAN_AGENT, "[%d][DL slice %d] illegal DL slice percentage (%d)\n", + mod_id, dls->id, dls->percentage); + return 0; + } + /* isolation is a protobuf bool */ + if (!flexran_dl_slice_verify_priority(dls->priority)) { + LOG_E(FLEXRAN_AGENT, "[%d][DL slice %d] illegal DL slice priority (%d)\n", + mod_id, dls->id, dls->priority); + return 0; + } + if (!flexran_dl_slice_verify_position(dls->position_low, dls->position_high)) { + LOG_E(FLEXRAN_AGENT, + "[%d][DL slice %d] illegal DL slice position low (%d) and/or high (%d)\n", + mod_id, dls->id, dls->position_low, dls->position_high); + return 0; + } + if (!flexran_dl_slice_verify_maxmcs(dls->maxmcs)) { + LOG_E(FLEXRAN_AGENT, "[%d][DL slice %d] illegal DL slice max mcs %d\n", + mod_id, dls->id, dls->maxmcs); + return 0; + } + if (dls->n_sorting == 0) { + LOG_E(FLEXRAN_AGENT, "[%d][DL slice %d] no sorting in DL slice\n", + mod_id, dls->id); + return 0; + } + if (!dls->sorting) { + LOG_E(FLEXRAN_AGENT, "[%d][DL slice %d] no sorting found in DL slice\n", + mod_id, dls->id); + return 0; + } + /* sorting is an enum */ + /* accounting is an enum */ + if (!dls->scheduler_name) { + LOG_E(FLEXRAN_AGENT, "[%d][DL slice %d] no scheduler name found\n", + mod_id, dls->id); + return 0; + } + if (strcmp(dls->scheduler_name, "schedule_ue_spec") != 0) { + LOG_E(FLEXRAN_AGENT, "[%d][DL slice %d] setting the scheduler to something " + "different than schedule_ue_spec is currently not allowed\n", + mod_id, dls->id); + return 0; + } + + return 1; +} + +int flexran_verify_group_dl_slices(mid_t mod_id, Protocol__FlexDlSlice **existing, + int n_ex, Protocol__FlexDlSlice **update, int n_up) +{ + int i, j, n; + int pct, pct_orig; + /* for every update, array points to existing slice, or NULL if update + * creates new slice */ + Protocol__FlexDlSlice *s[n_up]; + for (i = 0; i < n_up; i++) { + s[i] = NULL; + for (j = 0; j < n_ex; j++) { + if (existing[j]->id == update[i]->id) + s[i] = existing[j]; + } + } + + /* check that number of created and number of added slices in total matches + * [1,10] */ + n = n_ex; + for (i = 0; i < n_up; i++) { + /* new slice */ + if (!s[i]) n += 1; + /* slice will be deleted */ + else if (s[i]->percentage == 0) n -= 1; + /* else "only" an update */ + } + + if (n < 1 || n > MAX_NUM_SLICES) { + LOG_E(FLEXRAN_AGENT, "[%d] Illegal number of resulting DL slices (%d -> %d)\n", + mod_id, n_ex, n); + return 0; + } + + /* check that the sum of all slices percentages (including removed/added + * slices) matches [1,100] */ + pct = 0; + for (i = 0; i < n_ex; i++) { + pct += existing[i]->percentage; + } + pct_orig = pct; + for (i = 0; i < n_up; i++) { + /* if there is an existing slice, subtract its percentage and add the + * update's percentage */ + if (s[i]) + pct -= s[i]->percentage; + pct += update[i]->percentage; + } + if (pct < 1 || pct > 100) { + LOG_E(FLEXRAN_AGENT, + "[%d] invalid total RB share for DL slices (%d%% -> %d%%)\n", + mod_id, pct_orig, pct); + return 0; + } + + return 1; +} + +int flexran_verify_ul_slice(mid_t mod_id, Protocol__FlexUlSlice *uls) +{ + /* check mandatory parameters */ + if (!uls->has_id) { + LOG_E(FLEXRAN_AGENT, "[%d] Incoming UL slice configuration has no ID\n", mod_id); + return 0; + } + + /* verify parameters individually */ + /* label is enum */ + if (!flexran_ul_slice_verify_pct(uls->percentage)) { + LOG_E(FLEXRAN_AGENT, "[%d][UL slice %d] illegal UL slice percentage (%d)\n", + mod_id, uls->id, uls->percentage); + return 0; + } + /* isolation is a protobuf bool */ + if (!flexran_ul_slice_verify_priority(uls->priority)) { + LOG_E(FLEXRAN_AGENT, "[%d][UL slice %d] illegal UL slice percentage (%d)\n", + mod_id, uls->id, uls->priority); + return 0; + } + if (!flexran_ul_slice_verify_first_rb(uls->first_rb)) { + LOG_E(FLEXRAN_AGENT, "[%d][UL slice %d] illegal UL slice first RB (%d)\n", + mod_id, uls->id, uls->first_rb); + return 0; + } + if (!flexran_ul_slice_verify_maxmcs(uls->maxmcs)) { + LOG_E(FLEXRAN_AGENT, "[%d][UL slice %d] illegal UL slice max mcs (%d)\n", + mod_id, uls->id, uls->maxmcs); + return 0; + } + /* TODO + if (uls->n_sorting == 0) { + LOG_E(FLEXRAN_AGENT, "[%d][UL slice %d] no sorting in UL slice\n", + mod_id, uls->id); + return 0; + } + if (!uls->sorting) { + LOG_E(FLEXRAN_AGENT, "[%d][UL slice %d] no sorting found in UL slice\n", + mod_id, uls->id); + return 0; + } + */ + /* sorting is an enum */ + /* accounting is an enum */ + if (!uls->scheduler_name) { + LOG_E(FLEXRAN_AGENT, "[%d][UL slice %d] no scheduler name found\n", + mod_id, uls->id); + return 0; + } + if (strcmp(uls->scheduler_name, "schedule_ulsch_rnti") != 0) { + LOG_E(FLEXRAN_AGENT, "[%d][UL slice %d] setting the scheduler to something " + "different than schedule_ulsch_rnti is currently not allowed\n", + mod_id, uls->id); + return 0; + } + + return 1; +} + +int flexran_verify_group_ul_slices(mid_t mod_id, Protocol__FlexUlSlice **existing, + int n_ex, Protocol__FlexUlSlice **update, int n_up) +{ + int i, j, n; + int pct, pct_orig; + /* for every update, array "s" points to existing slice, or NULL if update + * creates new slice; array "offs" gives the offset of this slice */ + Protocol__FlexUlSlice *s[n_up]; + int offs[n_up]; + for (i = 0; i < n_up; i++) { + s[i] = NULL; + offs[i] = 0; + for (j = 0; j < n_ex; j++) { + if (existing[j]->id == update[i]->id) { + s[i] = existing[j]; + offs[i] = j; + } + } + } + + /* check that number of created and number of added slices in total matches + * [1,10] */ + n = n_ex; + for (i = 0; i < n_up; i++) { + /* new slice */ + if (!s[i]) n += 1; + /* slice will be deleted */ + else if (s[i]->percentage == 0) n -= 1; + /* else "only" an update */ + } + + if (n < 1 || n > MAX_NUM_SLICES) { + LOG_E(FLEXRAN_AGENT, "[%d] Illegal number of resulting UL slices (%d -> %d)\n", + mod_id, n_ex, n); + return 0; + } + + /* check that the sum of all slices percentages (including removed/added + * slices) matches [1,100] */ + pct = 0; + for (i = 0; i < n_ex; i++) { + pct += existing[i]->percentage; + } + pct_orig = pct; + for (i = 0; i < n_up; i++) { + /* if there is an existing slice, subtract its percentage and add the + * update's percentage */ + if (s[i]) + pct -= s[i]->percentage; + pct += update[i]->percentage; + } + if (pct < 1 || pct > 100) { + LOG_E(FLEXRAN_AGENT, "[%d] invalid total RB share (%d%% -> %d%%)\n", + mod_id, pct_orig, pct); + return 0; + } + + /* check that there is no overlap in slices resulting as the combination of + * first_rb and percentage */ + struct sregion_s sregion[n]; + const int N_RB = flexran_get_N_RB_UL(mod_id, 0); /* assume PCC */ + int k = n_ex; + for (i = 0; i < n_ex; i++) { + sregion[i].start = existing[i]->first_rb; + sregion[i].length = existing[i]->percentage * N_RB / 100; + } + for (i = 0; i < n_up; i++) { + ptrdiff_t d = s[i] ? offs[i] : k++; + AssertFatal(d >= 0 && d < k, "illegal pointer offset (%ld, k=%d)\n", d, k); + sregion[d].start = update[i]->first_rb; + sregion[d].length = update[i]->percentage * N_RB / 100; + } + AssertFatal(k == n, "illegal number of slices while calculating overlap\n"); + if (!check_ul_slice_overlap(mod_id, sregion, k)) { + LOG_E(FLEXRAN_AGENT, "[%d] UL slices are overlapping\n", mod_id); + return 0; + } + + return 1; +} + +int flexran_dl_slice_verify_pct(int pct) +{ + return pct >= 0 && pct <= 100; +} + +int flexran_dl_slice_verify_priority(int prio) +{ + return prio >= 0; +} + +int flexran_dl_slice_verify_position(int pos_low, int pos_high) +{ + return pos_low < pos_high && pos_low >= 0 && pos_high <= N_RBG_MAX; +} + +int flexran_dl_slice_verify_maxmcs(int maxmcs) +{ + return maxmcs >= 0 && maxmcs <= 28; +} + +int flexran_ul_slice_verify_pct(int pct) +{ + return pct >= 0 && pct <= 100; +} + +int flexran_ul_slice_verify_priority(int prio) +{ + return prio >= 0; +} + +int flexran_ul_slice_verify_first_rb(int first_rb) +{ + return first_rb >= 0 && first_rb < 100; +} + +int flexran_ul_slice_verify_maxmcs(int maxmcs) +{ + return maxmcs >= 0 && maxmcs <= 20; +} + +int sregion_compare(const void *_a, const void *_b) +{ + const struct sregion_s *a = (const struct sregion_s *)_a; + const struct sregion_s *b = (const struct sregion_s *)_b; + const int res = a->start - b->start; + if (res < 0) return -1; + else if (res == 0) return 0; + else return 1; +} + +int check_ul_slice_overlap(mid_t mod_id, struct sregion_s *sr, int n) +{ + int i; + int overlap, op, u; + const int N_RB = flexran_get_N_RB_UL(mod_id, 0); /* assume PCC */ + qsort(sr, n, sizeof(sr[0]), sregion_compare); + for (i = 0; i < n; i++) { + u = i == n-1 ? N_RB : sr[i+1].start; + AssertFatal(sr[i].start <= u, "unsorted slice list\n"); + overlap = sr[i].start + sr[i].length - u; + if (overlap <= 0) continue; + op = overlap * 100 / sr[i].length; + LOG_W(FLEXRAN_AGENT, "[%d] slice overlap of %d%% detected\n", mod_id, op); + if (op >= 10) /* more than 10% overlap -> refuse */ + return 0; + } + return 1; +} diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_slice_verification.h b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_slice_verification.h new file mode 100644 index 0000000000000000000000000000000000000000..a8f4030d3664be1e1388d8d42cae8feed9a4dd90 --- /dev/null +++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_slice_verification.h @@ -0,0 +1,37 @@ +/* + * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The OpenAirInterface Software Alliance licenses this file to You under + * the OAI Public License, Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.openairinterface.org/?page_id=698 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------------------- + * For more information about the OpenAirInterface (OAI) Software Alliance: + * contact@openairinterface.org + */ + +/*! \file flexran_agent_mac_slice_verification.h + * \brief MAC Agent slice verification helper functions + * \author Robert Schmidt + * \date 2018 + * \version 0.1 + */ + +#include "flexran_agent_common_internal.h" +#include "flexran_agent_mac_internal.h" + +int flexran_verify_dl_slice(mid_t mod_id, Protocol__FlexDlSlice *dls); +int flexran_verify_group_dl_slices(mid_t mod_id, Protocol__FlexDlSlice **existing, + int n_ex, Protocol__FlexDlSlice **update, int n_up); +int flexran_verify_ul_slice(mid_t mod_id, Protocol__FlexUlSlice *uls); +int flexran_verify_group_ul_slices(mid_t mod_id, Protocol__FlexUlSlice **existing, + int n_ex, Protocol__FlexUlSlice **update, int n_up); diff --git a/openair2/ENB_APP/CONTROL_MODULES/PDCP/flexran_agent_pdcp.c b/openair2/ENB_APP/CONTROL_MODULES/PDCP/flexran_agent_pdcp.c index b2723f718126973f96d972a3284b995c5e99c3e2..7cb39fb7239a17af935fa026ab0d20c12b866dbb 100644 --- a/openair2/ENB_APP/CONTROL_MODULES/PDCP/flexran_agent_pdcp.c +++ b/openair2/ENB_APP/CONTROL_MODULES/PDCP/flexran_agent_pdcp.c @@ -78,6 +78,7 @@ int flexran_agent_pdcp_stats_reply(mid_t mod_id, // Protocol__FlexHeader *header; int i; + int UE_id; // int cc_id = 0; @@ -85,6 +86,7 @@ int flexran_agent_pdcp_stats_reply(mid_t mod_id, if (report_config->nr_ue > 0) { for (i = 0; i < report_config->nr_ue; i++) { + UE_id = flexran_get_ue_id(mod_id, i); /* Check flag for creation of buffer status report */ if (report_config->ue_report_type[i].ue_report_flags & PROTOCOL__FLEX_UE_STATS_TYPE__FLUST_PDCP_STATS) { @@ -95,7 +97,7 @@ int flexran_agent_pdcp_stats_reply(mid_t mod_id, goto error; protocol__flex_pdcp_stats__init(pdcp_aggr_stats); - flexran_agent_pdcp_aggregate_stats(mod_id, i, pdcp_aggr_stats); + flexran_agent_pdcp_aggregate_stats(mod_id, UE_id, pdcp_aggr_stats); pdcp_aggr_stats->has_pkt_tx=1; pdcp_aggr_stats->has_pkt_tx_bytes =1; @@ -104,7 +106,7 @@ int flexran_agent_pdcp_stats_reply(mid_t mod_id, pdcp_aggr_stats->has_pkt_tx_aiat =1; pdcp_aggr_stats->has_pkt_tx_aiat_w =1; - pdcp_aggr_stats->pkt_tx_sn = flexran_get_pdcp_tx_sn(mod_id, i, DEFAULT_DRB); + pdcp_aggr_stats->pkt_tx_sn = flexran_get_pdcp_tx_sn(mod_id, UE_id, DEFAULT_DRB); pdcp_aggr_stats->has_pkt_tx_sn =1; pdcp_aggr_stats->has_pkt_rx =1; @@ -115,7 +117,7 @@ int flexran_agent_pdcp_stats_reply(mid_t mod_id, pdcp_aggr_stats->has_pkt_rx_aiat_w =1; pdcp_aggr_stats->has_pkt_rx_oo =1; - pdcp_aggr_stats->pkt_rx_sn = flexran_get_pdcp_rx_sn(mod_id, i, DEFAULT_DRB); + pdcp_aggr_stats->pkt_rx_sn = flexran_get_pdcp_rx_sn(mod_id, UE_id, DEFAULT_DRB); pdcp_aggr_stats->has_pkt_rx_sn =1; pdcp_aggr_stats->sfn = flexran_get_pdcp_sfn(mod_id); diff --git a/openair2/ENB_APP/CONTROL_MODULES/RRC/flexran_agent_rrc.c b/openair2/ENB_APP/CONTROL_MODULES/RRC/flexran_agent_rrc.c index b4bf0293731620731fc25ffb03268e3b17a1fcfc..b79ad8841715dfcfb227f52ba9f58b9c8c30725f 100644 --- a/openair2/ENB_APP/CONTROL_MODULES/RRC/flexran_agent_rrc.c +++ b/openair2/ENB_APP/CONTROL_MODULES/RRC/flexran_agent_rrc.c @@ -76,147 +76,147 @@ void flexran_agent_ue_state_change(mid_t mod_id, uint32_t rnti, uint8_t state_ch config->has_rnti = 1; config->rnti = rnti; } else if (state_change == PROTOCOL__FLEX_UE_STATE_CHANGE_TYPE__FLUESC_UPDATED - || state_change == PROTOCOL__FLEX_UE_STATE_CHANGE_TYPE__FLUESC_ACTIVATED) { - int i = find_UE_id(mod_id, rnti); - config->has_rnti = 1; - config->rnti = rnti; - if(flexran_get_time_alignment_timer(mod_id,i) != -1) { - config->time_alignment_timer = flexran_get_time_alignment_timer(mod_id,i); - config->has_time_alignment_timer = 1; - } - if(flexran_get_meas_gap_config(mod_id,i) != -1){ - config->meas_gap_config_pattern = flexran_get_meas_gap_config(mod_id,i); - config->has_meas_gap_config_pattern = 1; - } - if(config->has_meas_gap_config_pattern == 1 && - config->meas_gap_config_pattern != PROTOCOL__FLEX_MEAS_GAP_CONFIG_PATTERN__FLMGCP_OFF) { - config->meas_gap_config_sf_offset = flexran_get_meas_gap_config_offset(mod_id,i); - config->has_meas_gap_config_sf_offset = 1; - } - //TODO: Set the SPS configuration (Optional) - //Not supported for now, so we do not set it - - //TODO: Set the SR configuration (Optional) - //We do not set it for now - - //TODO: Set the CQI configuration (Optional) - //We do not set it for now - - if(flexran_get_ue_transmission_mode(mod_id,i) != -1) { - config->transmission_mode = flexran_get_ue_transmission_mode(mod_id,i); - config->has_transmission_mode = 1; - } - - config->ue_aggregated_max_bitrate_ul = flexran_get_ue_aggregated_max_bitrate_ul(mod_id,i); - config->has_ue_aggregated_max_bitrate_ul = 1; - - config->ue_aggregated_max_bitrate_dl = flexran_get_ue_aggregated_max_bitrate_dl(mod_id,i); - config->has_ue_aggregated_max_bitrate_dl = 1; - - //TODO: Set the UE capabilities - Protocol__FlexUeCapabilities *c_capabilities; - c_capabilities = malloc(sizeof(Protocol__FlexUeCapabilities)); - protocol__flex_ue_capabilities__init(c_capabilities); - //TODO: Set half duplex (FDD operation) - c_capabilities->has_half_duplex = 0; - c_capabilities->half_duplex = 1;//flexran_get_half_duplex(i); - //TODO: Set intra-frame hopping flag - c_capabilities->has_intra_sf_hopping = 0; - c_capabilities->intra_sf_hopping = 1;//flexran_get_intra_sf_hopping(i); - //TODO: Set support for type 2 hopping with n_sb > 1 - c_capabilities->has_type2_sb_1 = 0; - c_capabilities->type2_sb_1 = 1;//flexran_get_type2_sb_1(i); - //TODO: Set ue category - c_capabilities->has_ue_category = 0; - c_capabilities->ue_category = 1;//flexran_get_ue_category(i); - //TODO: Set UE support for resource allocation type 1 - c_capabilities->has_res_alloc_type1 = 0; - c_capabilities->res_alloc_type1 = 1;//flexran_get_res_alloc_type1(i); - //Set the capabilites to the message - config->capabilities = c_capabilities; - - if(flexran_get_ue_transmission_antenna(mod_id,i) != -1) { - config->has_ue_transmission_antenna = 1; - config->ue_transmission_antenna = flexran_get_ue_transmission_antenna(mod_id,i); - } - - if(flexran_get_tti_bundling(mod_id,i) != -1) { - config->has_tti_bundling = 1; - config->tti_bundling = flexran_get_tti_bundling(mod_id,i); - } - - if(flexran_get_maxHARQ_TX(mod_id,i) != -1){ - config->has_max_harq_tx = 1; - config->max_harq_tx = flexran_get_maxHARQ_TX(mod_id,i); - } - - if(flexran_get_beta_offset_ack_index(mod_id,i) != -1) { - config->has_beta_offset_ack_index = 1; - config->beta_offset_ack_index = flexran_get_beta_offset_ack_index(mod_id,i); - } - - if(flexran_get_beta_offset_ri_index(mod_id,i) != -1) { - config->has_beta_offset_ri_index = 1; - config->beta_offset_ri_index = flexran_get_beta_offset_ri_index(mod_id,i); - } - - if(flexran_get_beta_offset_cqi_index(mod_id,i) != -1) { - config->has_beta_offset_cqi_index = 1; - config->beta_offset_cqi_index = flexran_get_beta_offset_cqi_index(mod_id,i); - } - - /* assume primary carrier */ - if(flexran_get_ack_nack_simultaneous_trans(mod_id,i,0) != -1) { - config->has_ack_nack_simultaneous_trans = 1; - config->ack_nack_simultaneous_trans = flexran_get_ack_nack_simultaneous_trans(mod_id,i,0); - } - - if(flexran_get_simultaneous_ack_nack_cqi(mod_id,i) != -1) { - config->has_simultaneous_ack_nack_cqi = 1; - config->simultaneous_ack_nack_cqi = flexran_get_simultaneous_ack_nack_cqi(mod_id,i); - } - - if(flexran_get_aperiodic_cqi_rep_mode(mod_id,i) != -1) { - config->has_aperiodic_cqi_rep_mode = 1; - int mode = flexran_get_aperiodic_cqi_rep_mode(mod_id,i); - if (mode > 4) { - config->aperiodic_cqi_rep_mode = PROTOCOL__FLEX_APERIODIC_CQI_REPORT_MODE__FLACRM_NONE; - } else { - config->aperiodic_cqi_rep_mode = mode; - } - } - - if(flexran_get_tdd_ack_nack_feedback_mode(mod_id, i) != -1) { - config->has_tdd_ack_nack_feedback = 1; - config->tdd_ack_nack_feedback = flexran_get_tdd_ack_nack_feedback_mode(mod_id,i); - } - - if(flexran_get_ack_nack_repetition_factor(mod_id, i) != -1) { - config->has_ack_nack_repetition_factor = 1; - config->ack_nack_repetition_factor = flexran_get_ack_nack_repetition_factor(mod_id,i); - } - - if(flexran_get_extended_bsr_size(mod_id, i) != -1) { - config->has_extended_bsr_size = 1; - config->extended_bsr_size = flexran_get_extended_bsr_size(mod_id,i); - } - - config->has_pcell_carrier_index = 1; - config->pcell_carrier_index = UE_PCCID(mod_id, i); - //TODO: Set carrier aggregation support (boolean) - config->has_ca_support = 0; - config->ca_support = 0; - if(config->has_ca_support){ - //TODO: Set cross carrier scheduling support (boolean) - config->has_cross_carrier_sched_support = 1; - config->cross_carrier_sched_support = 0; - //TODO: Set secondary cells configuration - // We do not set it for now. No carrier aggregation support - - //TODO: Set deactivation timer for secondary cell - config->has_scell_deactivation_timer = 0; - config->scell_deactivation_timer = 0; - } + || state_change == PROTOCOL__FLEX_UE_STATE_CHANGE_TYPE__FLUESC_ACTIVATED) { + int i = find_UE_id(mod_id, rnti); + config->has_rnti = 1; + config->rnti = rnti; + config->imsi = flexran_get_ue_imsi(mod_id, i); + config->has_imsi = 1; + config->dl_slice_id = flexran_get_ue_dl_slice_id(mod_id, i); + config->has_dl_slice_id = 1; + config->ul_slice_id = flexran_get_ue_ul_slice_id(mod_id, i); + config->has_ul_slice_id = 1; + if(flexran_get_time_alignment_timer(mod_id,i) != -1) { + config->time_alignment_timer = flexran_get_time_alignment_timer(mod_id,i); + config->has_time_alignment_timer = 1; + } + if(flexran_get_meas_gap_config(mod_id,i) != -1){ + config->meas_gap_config_pattern = flexran_get_meas_gap_config(mod_id,i); + config->has_meas_gap_config_pattern = 1; + } + if(config->has_meas_gap_config_pattern == 1 && + config->meas_gap_config_pattern != PROTOCOL__FLEX_MEAS_GAP_CONFIG_PATTERN__FLMGCP_OFF) { + config->meas_gap_config_sf_offset = flexran_get_meas_gap_config_offset(mod_id,i); + config->has_meas_gap_config_sf_offset = 1; + } + //TODO: Set the SPS configuration (Optional) + //Not supported for now, so we do not set it + + //TODO: Set the SR configuration (Optional) + //We do not set it for now + + //TODO: Set the CQI configuration (Optional) + //We do not set it for now + + if(flexran_get_ue_transmission_mode(mod_id,i) != -1) { + config->transmission_mode = flexran_get_ue_transmission_mode(mod_id,i); + config->has_transmission_mode = 1; + } + + config->ue_aggregated_max_bitrate_ul = flexran_get_ue_aggregated_max_bitrate_ul(mod_id,i); + config->has_ue_aggregated_max_bitrate_ul = 1; + + config->ue_aggregated_max_bitrate_dl = flexran_get_ue_aggregated_max_bitrate_dl(mod_id,i); + config->has_ue_aggregated_max_bitrate_dl = 1; + + Protocol__FlexUeCapabilities *c_capabilities; + c_capabilities = malloc(sizeof(Protocol__FlexUeCapabilities)); + protocol__flex_ue_capabilities__init(c_capabilities); + c_capabilities->has_half_duplex = 1; + c_capabilities->half_duplex = flexran_get_half_duplex(mod_id, i); + c_capabilities->has_intra_sf_hopping = 1; + c_capabilities->intra_sf_hopping = flexran_get_intra_sf_hopping(mod_id, i); + c_capabilities->has_type2_sb_1 = 1; + c_capabilities->type2_sb_1 = flexran_get_type2_sb_1(mod_id, i); + c_capabilities->has_ue_category = 1; + c_capabilities->ue_category = flexran_get_ue_category(mod_id, i); + c_capabilities->has_res_alloc_type1 = 1; + c_capabilities->res_alloc_type1 = flexran_get_res_alloc_type1(mod_id, i); + //Set the capabilites to the message + config->capabilities = c_capabilities; + + if(flexran_get_ue_transmission_antenna(mod_id,i) != -1) { + config->has_ue_transmission_antenna = 1; + config->ue_transmission_antenna = flexran_get_ue_transmission_antenna(mod_id,i); + } + + if(flexran_get_tti_bundling(mod_id,i) != -1) { + config->has_tti_bundling = 1; + config->tti_bundling = flexran_get_tti_bundling(mod_id,i); + } + + if(flexran_get_maxHARQ_TX(mod_id,i) != -1){ + config->has_max_harq_tx = 1; + config->max_harq_tx = flexran_get_maxHARQ_TX(mod_id,i); + } + + if(flexran_get_beta_offset_ack_index(mod_id,i) != -1) { + config->has_beta_offset_ack_index = 1; + config->beta_offset_ack_index = flexran_get_beta_offset_ack_index(mod_id,i); + } + + if(flexran_get_beta_offset_ri_index(mod_id,i) != -1) { + config->has_beta_offset_ri_index = 1; + config->beta_offset_ri_index = flexran_get_beta_offset_ri_index(mod_id,i); + } + + if(flexran_get_beta_offset_cqi_index(mod_id,i) != -1) { + config->has_beta_offset_cqi_index = 1; + config->beta_offset_cqi_index = flexran_get_beta_offset_cqi_index(mod_id,i); + } + + /* assume primary carrier */ + if(flexran_get_ack_nack_simultaneous_trans(mod_id,i,0) != -1) { + config->has_ack_nack_simultaneous_trans = 1; + config->ack_nack_simultaneous_trans = flexran_get_ack_nack_simultaneous_trans(mod_id,i,0); + } + + if(flexran_get_simultaneous_ack_nack_cqi(mod_id,i) != -1) { + config->has_simultaneous_ack_nack_cqi = 1; + config->simultaneous_ack_nack_cqi = flexran_get_simultaneous_ack_nack_cqi(mod_id,i); + } + + if(flexran_get_aperiodic_cqi_rep_mode(mod_id,i) != -1) { + config->has_aperiodic_cqi_rep_mode = 1; + int mode = flexran_get_aperiodic_cqi_rep_mode(mod_id,i); + if (mode > 4) { + config->aperiodic_cqi_rep_mode = PROTOCOL__FLEX_APERIODIC_CQI_REPORT_MODE__FLACRM_NONE; + } else { + config->aperiodic_cqi_rep_mode = mode; + } + } + + if(flexran_get_tdd_ack_nack_feedback_mode(mod_id, i) != -1) { + config->has_tdd_ack_nack_feedback = 1; + config->tdd_ack_nack_feedback = flexran_get_tdd_ack_nack_feedback_mode(mod_id,i); + } + + if(flexran_get_ack_nack_repetition_factor(mod_id, i) != -1) { + config->has_ack_nack_repetition_factor = 1; + config->ack_nack_repetition_factor = flexran_get_ack_nack_repetition_factor(mod_id,i); + } + + if(flexran_get_extended_bsr_size(mod_id, i) != -1) { + config->has_extended_bsr_size = 1; + config->extended_bsr_size = flexran_get_extended_bsr_size(mod_id,i); + } + + config->has_pcell_carrier_index = 1; + config->pcell_carrier_index = UE_PCCID(mod_id, i); + //TODO: Set carrier aggregation support (boolean) + config->has_ca_support = 0; + config->ca_support = 0; + if(config->has_ca_support){ + //TODO: Set cross carrier scheduling support (boolean) + config->has_cross_carrier_sched_support = 1; + config->cross_carrier_sched_support = 0; + //TODO: Set secondary cells configuration + // We do not set it for now. No carrier aggregation support + + //TODO: Set deactivation timer for secondary cell + config->has_scell_deactivation_timer = 0; + config->scell_deactivation_timer = 0; + } } else if (state_change == PROTOCOL__FLEX_UE_STATE_CHANGE_TYPE__FLUESC_MOVED) { // TODO: Not supported for now. Leave blank } @@ -264,27 +264,31 @@ int flexran_agent_destroy_ue_state_change(Protocol__FlexranMessage *msg) { /* this is called by RRC as a part of rrc xface . The controller previously requested this*/ void flexran_trigger_rrc_measurements (mid_t mod_id, MeasResults_t* measResults) { - int i; + //int i; // int priority = 0; // Warning Preventing // void *data; // int size; // err_code_t err_code = -100; triggered_rrc = true; - int num; + //int num; + /* TODO do we need this at the current state? meas_stats is never put into a + * protobuf message?! num = flexran_get_num_ues (mod_id); meas_stats = malloc(sizeof(rrc_meas_stats) * num); for (i = 0; i < num; i++){ - meas_stats[i].rnti = flexran_get_ue_crnti(mod_id, i); - meas_stats[i].meas_id = flexran_get_rrc_pcell_measid(mod_id,i); - meas_stats[i].rsrp = flexran_get_rrc_pcell_rsrp(mod_id,i) - 140; + UE_id = flexran_get_ue_id(mod_id, i); + meas_stats[i].rnti = flexran_get_ue_crnti(mod_id, UE_id); + meas_stats[i].meas_id = flexran_get_rrc_pcell_measid(mod_id, UE_id); + meas_stats[i].rsrp = flexran_get_rrc_pcell_rsrp(mod_id, UE_id) - 140; // measResults->measResultPCell.rsrpResult - 140; - meas_stats[i].rsrq = flexran_get_rrc_pcell_rsrq(mod_id,i)/2 - 20; + meas_stats[i].rsrq = flexran_get_rrc_pcell_rsrq(mod_id, UE_id)/2 - 20; // (measResults->measResultPCell.rsrqResult)/2 - 20; } + */ // repl->neigh_meas = NULL; // if (meas->measResultNeighCells != NULL) { @@ -495,11 +499,14 @@ int flexran_agent_rrc_stats_reply(mid_t mod_id, // Protocol__FlexHeader *header; int i,j; + int UE_id; /* Allocate memory for list of UE reports */ if (report_config->nr_ue > 0) { for (i = 0; i < report_config->nr_ue; i++) { + + UE_id = flexran_get_ue_id(mod_id, i); /* Check flag for creation of buffer status report */ if (report_config->ue_report_type[i].ue_report_flags & PROTOCOL__FLEX_UE_STATS_TYPE__FLUST_RRC_MEASUREMENTS) { @@ -511,14 +518,14 @@ int flexran_agent_rrc_stats_reply(mid_t mod_id, goto error; protocol__flex_rrc_measurements__init(rrc_measurements); - rrc_measurements->measid = flexran_get_rrc_pcell_measid(mod_id,i); - rrc_measurements->has_measid = 1; - - rrc_measurements->pcell_rsrp = flexran_get_rrc_pcell_rsrp(mod_id,i); - rrc_measurements->has_pcell_rsrp = 1; - - rrc_measurements->pcell_rsrq = flexran_get_rrc_pcell_rsrq(mod_id,i); - rrc_measurements->has_pcell_rsrq = 1 ; + rrc_measurements->measid = flexran_get_rrc_pcell_measid(mod_id, UE_id); + rrc_measurements->has_measid = 1; + + rrc_measurements->pcell_rsrp = flexran_get_rrc_pcell_rsrp(mod_id, UE_id); + rrc_measurements->has_pcell_rsrp = 1; + + rrc_measurements->pcell_rsrq = flexran_get_rrc_pcell_rsrq(mod_id, UE_id); + rrc_measurements->has_pcell_rsrq = 1 ; /* Target Cell, Neghibouring*/ @@ -529,7 +536,7 @@ int flexran_agent_rrc_stats_reply(mid_t mod_id, protocol__flex_neigh_cells_measurements__init(neigh_meas); - neigh_meas->n_eutra_meas = flexran_get_rrc_num_ncell(mod_id, i); + neigh_meas->n_eutra_meas = flexran_get_rrc_num_ncell(mod_id, UE_id); Protocol__FlexEutraMeasurements **eutra_meas = NULL; @@ -547,7 +554,7 @@ int flexran_agent_rrc_stats_reply(mid_t mod_id, protocol__flex_eutra_measurements__init(eutra_meas[j]); - eutra_meas[j]->phys_cell_id = flexran_get_rrc_neigh_phy_cell_id(mod_id, i, j); + eutra_meas[j]->phys_cell_id = flexran_get_rrc_neigh_phy_cell_id(mod_id, UE_id, j); eutra_meas[j]->has_phys_cell_id = 1; @@ -558,10 +565,10 @@ int flexran_agent_rrc_stats_reply(mid_t mod_id, protocol__flex_eutra_ref_signal_meas__init(meas_result); - meas_result->rsrp = flexran_get_rrc_neigh_rsrp(mod_id, i, eutra_meas[j]->phys_cell_id); + meas_result->rsrp = flexran_get_rrc_neigh_rsrp(mod_id, UE_id, eutra_meas[j]->phys_cell_id); meas_result->has_rsrp = 1; - meas_result->rsrq = flexran_get_rrc_neigh_rsrq(mod_id, i, eutra_meas[j]->phys_cell_id); + meas_result->rsrq = flexran_get_rrc_neigh_rsrq(mod_id, UE_id, eutra_meas[j]->phys_cell_id); meas_result->has_rsrq = 1; eutra_meas[j]->meas_result = meas_result; @@ -625,8 +632,10 @@ int flexran_agent_rrc_stats_reply(mid_t mod_id, for (i = 0; i < report_config->nr_ue; i++){ + UE_id = flexran_get_ue_id(mod_id, i); + if (ue_report[i]->rrc_measurements->neigh_meas != NULL){ - for (j = 0; j < flexran_get_rrc_num_ncell(mod_id, i); j++){ + for (j = 0; j < flexran_get_rrc_num_ncell(mod_id, UE_id); j++){ free(ue_report[i]->rrc_measurements->neigh_meas->eutra_meas[j]); } diff --git a/openair2/ENB_APP/MESSAGES/V2/config_common.proto b/openair2/ENB_APP/MESSAGES/V2/config_common.proto index 7b392b6b3bbc7a89049fc5ae5708026d68064dc4..3af59c1537691b5ac40104cfab714106a770a257 100644 --- a/openair2/ENB_APP/MESSAGES/V2/config_common.proto +++ b/openair2/ENB_APP/MESSAGES/V2/config_common.proto @@ -58,6 +58,87 @@ enum flex_qam { FLEQ_MOD_64QAM = 1; } +// +// Slice config related structures and enums +// +enum flex_dl_sorting { + + CR_ROUND = 0; // Highest HARQ first + CR_SRB12 = 1; // Highest SRB1+2 first + CR_HOL = 2; // Highest HOL first + CR_LC = 3; // Greatest RLC buffer first + CR_CQI = 4; // Highest CQI first + CR_LCP = 5; // Highest LC priority first +} + +enum flex_ul_sorting { + CRU_ROUND = 0; // Highest HARQ first + CRU_BUF = 1; // Highest BSR first + CRU_BTS = 2; // More bytes to schedule first + CRU_MCS = 3; // Highest MCS first + CRU_LCP = 4; // Highest LC priority first + CRU_HOL = 5; // Highest HOL first +} + +enum flex_dl_accounting_policy { + POL_FAIR = 0; + POL_GREEDY = 1; + POL_NUM = 2; +} + +enum flex_ul_accounting_policy { + POLU_FAIR = 0; + POLU_GREEDY = 1; + POLU_NUM = 2; +} + +enum flex_slice_label { + xMBB = 0; + URLLC = 1; + mMTC = 2; + xMTC = 3; + Other = 4; +} + +message flex_dl_slice { + optional uint32 id = 1; + optional flex_slice_label label = 2; + // should be between 0 and 100 + optional uint32 percentage = 3; + // whether this slice should be exempted form interslice sharing + optional bool isolation = 4; + // increasing value means increasing prio + optional uint32 priority = 5; + // min and max RB to use (in frequency) in the range [0, N_RBG_MAX] + optional uint32 position_low = 6; + optional uint32 position_high = 7; + // maximum MCS to be allowed in this slice + optional uint32 maxmcs = 8; + repeated flex_dl_sorting sorting = 9; + optional flex_dl_accounting_policy accounting = 10; + optional string scheduler_name = 11; +} + +message flex_ul_slice { + optional uint32 id = 1; + optional flex_slice_label label = 2; + // should be between 0 and 100 + optional uint32 percentage = 3; + // whether this slice should be exempted form interslice sharing + optional bool isolation = 4; + // increasing value means increasing prio + optional uint32 priority = 5; + // RB start to use (in frequency) in the range [0, N_RB_MAX] + optional uint32 first_rb = 6; + // TODO RB number + //optional uint32 length_rb = 7; + // maximum MCS to be allowed in this slice + optional uint32 maxmcs = 8; + repeated flex_ul_sorting sorting = 9; + optional flex_ul_accounting_policy accounting = 10; + optional string scheduler_name = 11; +} + // // UE config related structures and enums // @@ -177,4 +258,4 @@ enum flex_ue_state_change_type { FLUESC_ACTIVATED = 1; FLUESC_DEACTIVATED = 2; FLUESC_MOVED = 3; -} \ No newline at end of file +} diff --git a/openair2/ENB_APP/MESSAGES/V2/config_messages.proto b/openair2/ENB_APP/MESSAGES/V2/config_messages.proto index f734686f011dcb1899a3bb95a52a92f4c6076572..1d5da8dd2abc0e756615096aee666e54125053dc 100644 --- a/openair2/ENB_APP/MESSAGES/V2/config_messages.proto +++ b/openair2/ENB_APP/MESSAGES/V2/config_messages.proto @@ -43,6 +43,19 @@ message flex_cell_config { optional uint32 eutra_band= 37; // operating band optional int32 dl_pdsch_power = 38; // operating downlink power optional int32 ul_pusch_power = 39; // operating uplink power + + optional flex_slice_config slice_config = 42; +} + +message flex_slice_config { + // whether remaining RBs after first intra-slice allocation will + // be allocated to UEs of the same slice + optional bool intraslice_share_active = 3; + // whether remaining RBs after slice allocation will be allocated + // to UEs of another slice. Isolated slices will be ignored. + optional bool interslice_share_active = 4; + repeated flex_dl_slice dl = 1; + repeated flex_ul_slice ul = 2; } message flex_ue_config { @@ -82,6 +95,8 @@ message flex_ue_config { repeated flex_scell_config scell_config = 28; // Secondary cells configuration optional uint32 scell_deactivation_timer = 29;// Deactivation timer for secondary cell optional uint64 imsi = 30; + optional uint32 dl_slice_id = 31; + optional uint32 ul_slice_id = 32; } message flex_lc_ue_config { diff --git a/openair2/ENB_APP/enb_app.c b/openair2/ENB_APP/enb_app.c index ad3f5fd6ed7d063f0ddb36a8cb7e5709d3ac216a..bda6d480a979cd6f7ee7cc0c0ee910a340983a33 100644 --- a/openair2/ENB_APP/enb_app.c +++ b/openair2/ENB_APP/enb_app.c @@ -64,6 +64,8 @@ extern RAN_CONTEXT_t RC; # define ENB_REGISTER_RETRY_DELAY 10 # endif +#include "targets/RT/USER/lte-softmodem.h" + /*------------------------------------------------------------------------------*/ /* @@ -244,6 +246,10 @@ void *eNB_app_task(void *args_p) LOG_I(ENB_APP, "Received %s\n", ITTI_MSG_NAME(msg_p)); break; + case SOFT_RESTART_MESSAGE: + handle_reconfiguration(instance); + break; + case S1AP_REGISTER_ENB_CNF: # if defined(ENABLE_USE_MME) LOG_I(ENB_APP, "[eNB %d] Received %s: associated MME %d\n", instance, ITTI_MSG_NAME (msg_p), @@ -321,3 +327,53 @@ void *eNB_app_task(void *args_p) return NULL; } + +void handle_reconfiguration(module_id_t mod_id) +{ + struct timespec start, end; + clock_gettime(CLOCK_MONOTONIC, &start); + flexran_agent_info_t *flexran = RC.flexran[mod_id]; + + LOG_I(ENB_APP, "lte-softmodem soft-restart requested\n"); + + if (ENB_WAIT == flexran->node_ctrl_state) { + /* this is already waiting, just release */ + pthread_mutex_lock(&flexran->mutex_node_ctrl); + flexran->node_ctrl_state = ENB_NORMAL_OPERATION; + pthread_mutex_unlock(&flexran->mutex_node_ctrl); + pthread_cond_signal(&flexran->cond_node_ctrl); + return; + } + + if (stop_L1L2(mod_id) < 0) { + LOG_E(ENB_APP, "can not stop lte-softmodem, aborting restart\n"); + return; + } + + /* node_ctrl_state should have value ENB_MAKE_WAIT only if this method is not + * executed by the FlexRAN thread */ + if (ENB_MAKE_WAIT == flexran->node_ctrl_state) { + LOG_I(ENB_APP, " * eNB %d: Waiting for FlexRAN RTController command *\n", mod_id); + pthread_mutex_lock(&flexran->mutex_node_ctrl); + flexran->node_ctrl_state = ENB_WAIT; + while (ENB_NORMAL_OPERATION != flexran->node_ctrl_state) + pthread_cond_wait(&flexran->cond_node_ctrl, &flexran->mutex_node_ctrl); + pthread_mutex_unlock(&flexran->mutex_node_ctrl); + } + + if (restart_L1L2(mod_id) < 0) { + LOG_E(ENB_APP, "can not restart, killing lte-softmodem\n"); + itti_terminate_tasks(TASK_PHY_ENB); + return; + } + + clock_gettime(CLOCK_MONOTONIC, &end); + end.tv_sec -= start.tv_sec; + if (end.tv_nsec >= start.tv_nsec) { + end.tv_nsec -= start.tv_nsec; + } else { + end.tv_sec -= 1; + end.tv_nsec = end.tv_nsec - start.tv_nsec + 1000000000; + } + LOG_I(ENB_APP, "lte-softmodem restart succeeded in %ld.%ld s\n", end.tv_sec, end.tv_nsec / 1000000); +} diff --git a/openair2/ENB_APP/enb_app.h b/openair2/ENB_APP/enb_app.h index 4dfea72eefbbbf6bc951f562b17f91b869e3ed4a..9eb5ea2400fee9a964fbd8a3fc2d026b612d437f 100644 --- a/openair2/ENB_APP/enb_app.h +++ b/openair2/ENB_APP/enb_app.h @@ -31,11 +31,11 @@ #define ENB_APP_H_ #include <stdint.h> +#include "platform_types.h" void *eNB_app_task(void *args_p); -/* needed for flexran: start PHY and RRC when restarting */ -void enb_app_start_phy_rrc(uint32_t enb_id_start, uint32_t enb_id_end); +void handle_reconfiguration(module_id_t mod_id); #endif /* ENB_APP_H_ */ diff --git a/openair2/ENB_APP/enb_config.c b/openair2/ENB_APP/enb_config.c index 012375e66888995bfc4ded98d7a99e5307022455..6d9b2edf82ab3cfc2263b824bef33a63065a8212 100644 --- a/openair2/ENB_APP/enb_config.c +++ b/openair2/ENB_APP/enb_config.c @@ -101,54 +101,51 @@ void RCconfig_flexran() ue_TimersAndConstants_t300, ue_TimersAndConstants_t301, ue_TimersAndConstants_t310, ue_TimersAndConstants_t311, ue_TimersAndConstants_n310, ue_TimersAndConstants_n311, - ue_TransmissionMode; - - int32_t ue_multiple_max = 0; - - e_SL_CP_Len_r12 rxPool_sc_CP_Len; - e_SL_PeriodComm_r12 rxPool_sc_Period; - e_SL_CP_Len_r12 rxPool_data_CP_Len; - long rxPool_ResourceConfig_prb_Num; - long rxPool_ResourceConfig_prb_Start; - long rxPool_ResourceConfig_prb_End; - SL_OffsetIndicator_r12_PR rxPool_ResourceConfig_offsetIndicator_present; - long rxPool_ResourceConfig_offsetIndicator_choice; - SubframeBitmapSL_r12_PR rxPool_ResourceConfig_subframeBitmap_present; - char* rxPool_ResourceConfig_subframeBitmap_choice_bs_buf; - long rxPool_ResourceConfig_subframeBitmap_choice_bs_size; - long rxPool_ResourceConfig_subframeBitmap_choice_bs_bits_unused; - - //SIB19 - //for discRxPool - SL_CP_Len_r12_t discRxPool_cp_Len; - e_SL_DiscResourcePool_r12__discPeriod_r12 discRxPool_discPeriod; - long discRxPool_numRetx; - long discRxPool_numRepetition; - long discRxPool_ResourceConfig_prb_Num; - long discRxPool_ResourceConfig_prb_Start; - long discRxPool_ResourceConfig_prb_End; - SL_OffsetIndicator_r12_PR discRxPool_ResourceConfig_offsetIndicator_present; - long discRxPool_ResourceConfig_offsetIndicator_choice; - SubframeBitmapSL_r12_PR discRxPool_ResourceConfig_subframeBitmap_present; - char* discRxPool_ResourceConfig_subframeBitmap_choice_bs_buf; - long discRxPool_ResourceConfig_subframeBitmap_choice_bs_size; - long discRxPool_ResourceConfig_subframeBitmap_choice_bs_bits_unused; - //for discRxPoolPS - SL_CP_Len_r12_t discRxPoolPS_cp_Len; - e_SL_DiscResourcePool_r12__discPeriod_r12 discRxPoolPS_discPeriod; - long discRxPoolPS_numRetx; - long discRxPoolPS_numRepetition; - long discRxPoolPS_ResourceConfig_prb_Num; - long discRxPoolPS_ResourceConfig_prb_Start; - long discRxPoolPS_ResourceConfig_prb_End; - SL_OffsetIndicator_r12_PR discRxPoolPS_ResourceConfig_offsetIndicator_present; - long discRxPoolPS_ResourceConfig_offsetIndicator_choice; - SubframeBitmapSL_r12_PR discRxPoolPS_ResourceConfig_subframeBitmap_present; - char* discRxPoolPS_ResourceConfig_subframeBitmap_choice_bs_buf; - long discRxPoolPS_ResourceConfig_subframeBitmap_choice_bs_size; - long discRxPoolPS_ResourceConfig_subframeBitmap_choice_bs_bits_unused; - + ue_TransmissionMode, ue_multiple_max; + + const char* rxPool_sc_CP_Len; + const char* rxPool_sc_Period; + const char* rxPool_data_CP_Len; + libconfig_int rxPool_ResourceConfig_prb_Num; + libconfig_int rxPool_ResourceConfig_prb_Start; + libconfig_int rxPool_ResourceConfig_prb_End; + const char* rxPool_ResourceConfig_offsetIndicator_present; + libconfig_int rxPool_ResourceConfig_offsetIndicator_choice; + const char* rxPool_ResourceConfig_subframeBitmap_present; + char* rxPool_ResourceConfig_subframeBitmap_choice_bs_buf; + libconfig_int rxPool_ResourceConfig_subframeBitmap_choice_bs_size; + libconfig_int rxPool_ResourceConfig_subframeBitmap_choice_bs_bits_unused; + //SIB19 + //for discRxPool + const char* discRxPool_cp_Len; + const char* discRxPool_discPeriod; + libconfig_int discRxPool_numRetx; + libconfig_int discRxPool_numRepetition; + libconfig_int discRxPool_ResourceConfig_prb_Num; + libconfig_int discRxPool_ResourceConfig_prb_Start; + libconfig_int discRxPool_ResourceConfig_prb_End; + const char* discRxPool_ResourceConfig_offsetIndicator_present; + libconfig_int discRxPool_ResourceConfig_offsetIndicator_choice; + const char* discRxPool_ResourceConfig_subframeBitmap_present; + char* discRxPool_ResourceConfig_subframeBitmap_choice_bs_buf; + libconfig_int discRxPool_ResourceConfig_subframeBitmap_choice_bs_size; + libconfig_int discRxPool_ResourceConfig_subframeBitmap_choice_bs_bits_unused; + + //for discRxPoolPS + const char* discRxPoolPS_cp_Len; + const char* discRxPoolPS_discPeriod; + libconfig_int discRxPoolPS_numRetx; + libconfig_int discRxPoolPS_numRepetition; + libconfig_int discRxPoolPS_ResourceConfig_prb_Num; + libconfig_int discRxPoolPS_ResourceConfig_prb_Start; + libconfig_int discRxPoolPS_ResourceConfig_prb_End; + const char* discRxPoolPS_ResourceConfig_offsetIndicator_present; + libconfig_int discRxPoolPS_ResourceConfig_offsetIndicator_choice; + const char* discRxPoolPS_ResourceConfig_subframeBitmap_present; + char* discRxPoolPS_ResourceConfig_subframeBitmap_choice_bs_buf; + libconfig_int discRxPoolPS_ResourceConfig_subframeBitmap_choice_bs_size; + libconfig_int discRxPoolPS_ResourceConfig_subframeBitmap_choice_bs_bits_unused; /* get number of eNBs */ paramdef_t ENBSParams[] = ENBSPARAMS_DESC; @@ -292,7 +289,7 @@ void RCconfig_L1(void) { LOG_I(PHY,"%s() NFAPI PNF mode - RC.nb_CC[0]=%d for init_eNB_afterRU()\n", __FUNCTION__, RC.nb_CC[0]); LOG_I(PHY,"%s() NFAPI PNF mode - RC.nb_macrlc_inst:%d because used by mac_top_init_eNB()\n", __FUNCTION__, RC.nb_macrlc_inst); - mac_top_init_eNB(); + //mac_top_init_eNB(); configure_nfapi_pnf(RC.eNB[j][0]->eth_params_n.remote_addr, RC.eNB[j][0]->eth_params_n.remote_portc, RC.eNB[j][0]->eth_params_n.my_addr, RC.eNB[j][0]->eth_params_n.my_portd, RC.eNB[j][0]->eth_params_n .remote_portd); } diff --git a/openair2/ENB_APP/flexran_agent_common.c b/openair2/ENB_APP/flexran_agent_common.c index db1943c9451f15304902cefd1d5ee080897280bf..1a92c9e2621e37ae028e1ceba277d7357527db7f 100644 --- a/openair2/ENB_APP/flexran_agent_common.c +++ b/openair2/ENB_APP/flexran_agent_common.c @@ -37,6 +37,7 @@ #include "flexran_agent_ran_api.h" //#include "PHY/extern.h" #include "common/utils/LOG/log.h" +#include "flexran_agent_mac_internal.h" //#include "SCHED/defs.h" #include "RRC/LTE/rrc_extern.h" @@ -287,6 +288,23 @@ int flexran_agent_destroy_enb_config_reply(Protocol__FlexranMessage *msg) { free(reply->cell_config[i]->si_config->si_message); free(reply->cell_config[i]->si_config); } + if (reply->cell_config[i]->slice_config != NULL) { + for (j = 0; j < reply->cell_config[i]->slice_config->n_dl; ++j) { + if (reply->cell_config[i]->slice_config->dl[j]->n_sorting > 0) + free(reply->cell_config[i]->slice_config->dl[j]->sorting); + free(reply->cell_config[i]->slice_config->dl[j]->scheduler_name); + free(reply->cell_config[i]->slice_config->dl[j]); + } + free(reply->cell_config[i]->slice_config->dl); + for (j = 0; j < reply->cell_config[i]->slice_config->n_ul; ++j) { + if (reply->cell_config[i]->slice_config->ul[j]->n_sorting > 0) + free(reply->cell_config[i]->slice_config->ul[j]->sorting); + free(reply->cell_config[i]->slice_config->ul[j]->scheduler_name); + free(reply->cell_config[i]->slice_config->ul[j]); + } + free(reply->cell_config[i]->slice_config->ul); + free(reply->cell_config[i]->slice_config); + } free(reply->cell_config[i]); } free(reply->cell_config); @@ -396,8 +414,14 @@ int flexran_agent_control_delegation(mid_t mod_id, const void *params, Protocol_ FILE *f; f = fopen(target, "wb"); - fwrite(control_delegation_msg->payload.data, control_delegation_msg->payload.len, 1, f); - fclose(f); + if (f) { + fwrite(control_delegation_msg->payload.data, control_delegation_msg->payload.len, 1, f); + fclose(f); + } + else { + LOG_W(FLEXRAN_AGENT, "[%d] can not write control delegation data to %s\n", + mod_id, target); + } // long time_elapsed_nanos = timer_end(vartime); *msg = NULL; @@ -435,6 +459,7 @@ int flexran_agent_lc_config_reply(mid_t mod_id, const void *params, Protocol__Fl xid = (lc_config_request_msg->header)->xid; int i, j; + int UE_id; Protocol__FlexLcConfigReply *lc_config_reply_msg; lc_config_reply_msg = malloc(sizeof(Protocol__FlexLcConfigReply)); @@ -460,13 +485,15 @@ int flexran_agent_lc_config_reply(mid_t mod_id, const void *params, Protocol__Fl lc_ue_config[i] = malloc(sizeof(Protocol__FlexLcUeConfig)); protocol__flex_lc_ue_config__init(lc_ue_config[i]); + UE_id = flexran_get_ue_id(mod_id, i); + lc_ue_config[i]->has_rnti = 1; - lc_ue_config[i]->rnti = flexran_get_ue_crnti(mod_id,i); + lc_ue_config[i]->rnti = flexran_get_ue_crnti(mod_id, UE_id); //TODO: Set the number of LC configurations that will be reported for this UE //Set this according to the current state of the UE. This is only a temporary fix int status = 0; - status = mac_eNB_get_rrc_status(mod_id, flexran_get_ue_crnti(mod_id, i)); + status = mac_eNB_get_rrc_status(mod_id, flexran_get_ue_crnti(mod_id, UE_id)); /* TODO needs to be revised and appropriate API to be implemented */ if (status < RRC_CONNECTED) { lc_ue_config[i]->n_lc_config = 0; @@ -489,14 +516,14 @@ int flexran_agent_lc_config_reply(mid_t mod_id, const void *params, Protocol__Fl lc_config[j]->has_lcid = 1; lc_config[j]->lcid = j+1; - int lcg = flexran_get_lcg(mod_id, i, j+1); + int lcg = flexran_get_lcg(mod_id, UE_id, j+1); if (lcg >= 0 && lcg <= 3) { lc_config[j]->has_lcg = 1; - lc_config[j]->lcg = flexran_get_lcg(mod_id, i,j+1); + lc_config[j]->lcg = flexran_get_lcg(mod_id, UE_id, j+1); } lc_config[j]->has_direction = 1; - lc_config[j]->direction = flexran_get_direction(i,j+1); + lc_config[j]->direction = flexran_get_direction(UE_id, j+1); //TODO: Bearer type. One of FLQBT_* values. Currently only default bearer supported lc_config[j]->has_qos_bearer_type = 1; lc_config[j]->qos_bearer_type = PROTOCOL__FLEX_QOS_BEARER_TYPE__FLQBT_NON_GBR; @@ -563,6 +590,7 @@ int flexran_agent_ue_config_reply(mid_t mod_id, const void *params, Protocol__Fl xid = (ue_config_request_msg->header)->xid; int i; + int UE_id; Protocol__FlexUeConfigReply *ue_config_reply_msg; ue_config_reply_msg = malloc(sizeof(Protocol__FlexUeConfigReply)); @@ -587,26 +615,31 @@ int flexran_agent_ue_config_reply(mid_t mod_id, const void *params, Protocol__Fl ue_config[i] = malloc(sizeof(Protocol__FlexUeConfig)); protocol__flex_ue_config__init(ue_config[i]); - ue_config[i]->rnti = flexran_get_ue_crnti(mod_id,i); + UE_id = flexran_get_ue_id(mod_id, i); + ue_config[i]->rnti = flexran_get_ue_crnti(mod_id, UE_id); ue_config[i]->has_rnti = 1; - ue_config[i]->imsi = flexran_get_ue_imsi(mod_id, i); + ue_config[i]->imsi = flexran_get_ue_imsi(mod_id, UE_id); ue_config[i]->has_imsi = 1; + ue_config[i]->dl_slice_id = flexran_get_ue_dl_slice_id(mod_id, UE_id); + ue_config[i]->has_dl_slice_id = 1; + ue_config[i]->ul_slice_id = flexran_get_ue_ul_slice_id(mod_id, UE_id); + ue_config[i]->has_ul_slice_id = 1; //TODO: Set the DRX configuration (optional) //Not supported for now, so we do not set it - if (flexran_get_time_alignment_timer(mod_id,i) != -1) { - ue_config[i]->time_alignment_timer = flexran_get_time_alignment_timer(mod_id,i); - ue_config[i]->has_time_alignment_timer = 1; + if (flexran_get_time_alignment_timer(mod_id, UE_id) != -1) { + ue_config[i]->time_alignment_timer = flexran_get_time_alignment_timer(mod_id, UE_id); + ue_config[i]->has_time_alignment_timer = 1; } - if (flexran_get_meas_gap_config(mod_id,i) != -1) { - ue_config[i]->meas_gap_config_pattern = flexran_get_meas_gap_config(mod_id,i); - ue_config[i]->has_meas_gap_config_pattern = 1; + if (flexran_get_meas_gap_config(mod_id, UE_id) != -1) { + ue_config[i]->meas_gap_config_pattern = flexran_get_meas_gap_config(mod_id, UE_id); + ue_config[i]->has_meas_gap_config_pattern = 1; } if (ue_config[i]->has_meas_gap_config_pattern == 1 && - ue_config[i]->meas_gap_config_pattern != PROTOCOL__FLEX_MEAS_GAP_CONFIG_PATTERN__FLMGCP_OFF) { - ue_config[i]->meas_gap_config_sf_offset = flexran_get_meas_gap_config_offset(mod_id,i); + ue_config[i]->meas_gap_config_pattern != PROTOCOL__FLEX_MEAS_GAP_CONFIG_PATTERN__FLMGCP_OFF) { + ue_config[i]->meas_gap_config_sf_offset = flexran_get_meas_gap_config_offset(mod_id, UE_id); ue_config[i]->has_meas_gap_config_sf_offset = 1; } //TODO: Set the SPS configuration (Optional) @@ -618,77 +651,77 @@ int flexran_agent_ue_config_reply(mid_t mod_id, const void *params, Protocol__Fl //TODO: Set the CQI configuration (Optional) //We do not set it for now - if (flexran_get_ue_transmission_mode(mod_id,i) != -1) { - ue_config[i]->transmission_mode = flexran_get_ue_transmission_mode(mod_id,i); + if (flexran_get_ue_transmission_mode(mod_id, UE_id) != -1) { + ue_config[i]->transmission_mode = flexran_get_ue_transmission_mode(mod_id, UE_id); ue_config[i]->has_transmission_mode = 1; } - ue_config[i]->ue_aggregated_max_bitrate_ul = flexran_get_ue_aggregated_max_bitrate_ul(mod_id,i); + ue_config[i]->ue_aggregated_max_bitrate_ul = flexran_get_ue_aggregated_max_bitrate_ul(mod_id, UE_id); ue_config[i]->has_ue_aggregated_max_bitrate_ul = 1; - ue_config[i]->ue_aggregated_max_bitrate_dl = flexran_get_ue_aggregated_max_bitrate_dl(mod_id,i); + ue_config[i]->ue_aggregated_max_bitrate_dl = flexran_get_ue_aggregated_max_bitrate_dl(mod_id, UE_id); ue_config[i]->has_ue_aggregated_max_bitrate_dl = 1; Protocol__FlexUeCapabilities *capabilities; capabilities = malloc(sizeof(Protocol__FlexUeCapabilities)); protocol__flex_ue_capabilities__init(capabilities); capabilities->has_half_duplex = 1; - capabilities->half_duplex = flexran_get_half_duplex(mod_id, i); + capabilities->half_duplex = flexran_get_half_duplex(mod_id, UE_id); capabilities->has_intra_sf_hopping = 1; - capabilities->intra_sf_hopping = flexran_get_intra_sf_hopping(mod_id, i); + capabilities->intra_sf_hopping = flexran_get_intra_sf_hopping(mod_id, UE_id); capabilities->has_type2_sb_1 = 1; - capabilities->type2_sb_1 = flexran_get_type2_sb_1(mod_id, i); + capabilities->type2_sb_1 = flexran_get_type2_sb_1(mod_id, UE_id); capabilities->has_ue_category = 1; - capabilities->ue_category = flexran_get_ue_category(mod_id, i); + capabilities->ue_category = flexran_get_ue_category(mod_id, UE_id); capabilities->has_res_alloc_type1 = 1; - capabilities->res_alloc_type1 = flexran_get_res_alloc_type1(mod_id, i); + capabilities->res_alloc_type1 = flexran_get_res_alloc_type1(mod_id, UE_id); //Set the capabilites to the message ue_config[i]->capabilities = capabilities; - if (flexran_get_ue_transmission_antenna(mod_id,i) != -1) { + if (flexran_get_ue_transmission_antenna(mod_id, UE_id) != -1) { ue_config[i]->has_ue_transmission_antenna = 1; - ue_config[i]->ue_transmission_antenna = flexran_get_ue_transmission_antenna(mod_id,i); + ue_config[i]->ue_transmission_antenna = flexran_get_ue_transmission_antenna(mod_id, UE_id); } - if (flexran_get_tti_bundling(mod_id,i) != -1) { + if (flexran_get_tti_bundling(mod_id, UE_id) != -1) { ue_config[i]->has_tti_bundling = 1; - ue_config[i]->tti_bundling = flexran_get_tti_bundling(mod_id,i); + ue_config[i]->tti_bundling = flexran_get_tti_bundling(mod_id, UE_id); } - if (flexran_get_maxHARQ_TX(mod_id,i) != -1) { + if (flexran_get_maxHARQ_TX(mod_id, UE_id) != -1) { ue_config[i]->has_max_harq_tx = 1; - ue_config[i]->max_harq_tx = flexran_get_maxHARQ_TX(mod_id,i); + ue_config[i]->max_harq_tx = flexran_get_maxHARQ_TX(mod_id, UE_id); } - if (flexran_get_beta_offset_ack_index(mod_id,i) != -1) { - ue_config[i]->has_beta_offset_ack_index = 1; - ue_config[i]->beta_offset_ack_index = flexran_get_beta_offset_ack_index(mod_id,i); + if (flexran_get_beta_offset_ack_index(mod_id, UE_id) != -1) { + ue_config[i]->has_beta_offset_ack_index = 1; + ue_config[i]->beta_offset_ack_index = flexran_get_beta_offset_ack_index(mod_id, UE_id); } - if (flexran_get_beta_offset_ri_index(mod_id,i) != -1) { + if (flexran_get_beta_offset_ri_index(mod_id, UE_id) != -1) { ue_config[i]->has_beta_offset_ri_index = 1; - ue_config[i]->beta_offset_ri_index = flexran_get_beta_offset_ri_index(mod_id,i); + ue_config[i]->beta_offset_ri_index = flexran_get_beta_offset_ri_index(mod_id, UE_id); } - if (flexran_get_beta_offset_cqi_index(mod_id,i) != -1) { + if (flexran_get_beta_offset_cqi_index(mod_id, UE_id) != -1) { ue_config[i]->has_beta_offset_cqi_index = 1; - ue_config[i]->beta_offset_cqi_index = flexran_get_beta_offset_cqi_index(mod_id,i); + ue_config[i]->beta_offset_cqi_index = flexran_get_beta_offset_cqi_index(mod_id, UE_id); } /* assume primary carrier */ - if (flexran_get_ack_nack_simultaneous_trans(mod_id, i, 0) != -1) { + if (flexran_get_ack_nack_simultaneous_trans(mod_id, UE_id, 0) != -1) { ue_config[i]->has_ack_nack_simultaneous_trans = 1; - ue_config[i]->ack_nack_simultaneous_trans = flexran_get_ack_nack_simultaneous_trans(mod_id, i, 0); + ue_config[i]->ack_nack_simultaneous_trans = flexran_get_ack_nack_simultaneous_trans(mod_id, UE_id, 0); } - if (flexran_get_simultaneous_ack_nack_cqi(mod_id,i) != -1) { + if (flexran_get_simultaneous_ack_nack_cqi(mod_id, UE_id) != -1) { ue_config[i]->has_simultaneous_ack_nack_cqi = 1; - ue_config[i]->simultaneous_ack_nack_cqi = flexran_get_simultaneous_ack_nack_cqi(mod_id,i); + ue_config[i]->simultaneous_ack_nack_cqi = flexran_get_simultaneous_ack_nack_cqi(mod_id, UE_id); } - if (flexran_get_aperiodic_cqi_rep_mode(mod_id,i) != -1) { + if (flexran_get_aperiodic_cqi_rep_mode(mod_id, UE_id) != -1) { ue_config[i]->has_aperiodic_cqi_rep_mode = 1; - int mode = flexran_get_aperiodic_cqi_rep_mode(mod_id,i); + int mode = flexran_get_aperiodic_cqi_rep_mode(mod_id, UE_id); if (mode > 4) { ue_config[i]->aperiodic_cqi_rep_mode = PROTOCOL__FLEX_APERIODIC_CQI_REPORT_MODE__FLACRM_NONE; } else { @@ -696,26 +729,26 @@ int flexran_agent_ue_config_reply(mid_t mod_id, const void *params, Protocol__Fl } } - if (flexran_get_tdd_ack_nack_feedback_mode(mod_id, i) != -1) { + if (flexran_get_tdd_ack_nack_feedback_mode(mod_id, UE_id) != -1) { ue_config[i]->has_tdd_ack_nack_feedback = 1; - ue_config[i]->tdd_ack_nack_feedback = flexran_get_tdd_ack_nack_feedback_mode(mod_id,i); + ue_config[i]->tdd_ack_nack_feedback = flexran_get_tdd_ack_nack_feedback_mode(mod_id, UE_id); } - if(flexran_get_ack_nack_repetition_factor(mod_id, i) != -1) { + if(flexran_get_ack_nack_repetition_factor(mod_id, UE_id) != -1) { ue_config[i]->has_ack_nack_repetition_factor = 1; - ue_config[i]->ack_nack_repetition_factor = flexran_get_ack_nack_repetition_factor(mod_id,i); + ue_config[i]->ack_nack_repetition_factor = flexran_get_ack_nack_repetition_factor(mod_id, UE_id); } - if (flexran_get_extended_bsr_size(mod_id, i) != -1) { + if (flexran_get_extended_bsr_size(mod_id, UE_id) != -1) { ue_config[i]->has_extended_bsr_size = 1; - ue_config[i]->extended_bsr_size = flexran_get_extended_bsr_size(mod_id,i); + ue_config[i]->extended_bsr_size = flexran_get_extended_bsr_size(mod_id, UE_id); } //TODO: Set carrier aggregation support (boolean) ue_config[i]->has_ca_support = 0; ue_config[i]->ca_support = 0; ue_config[i]->has_pcell_carrier_index = 1; - ue_config[i]->pcell_carrier_index = UE_PCCID(mod_id, i); + ue_config[i]->pcell_carrier_index = UE_PCCID(mod_id, UE_id); if(ue_config[i]->has_ca_support){ //TODO: Set cross carrier scheduling support (boolean) ue_config[i]->has_cross_carrier_sched_support = 0; @@ -830,8 +863,8 @@ int flexran_agent_enb_config_reply(mid_t mod_id, const void *params, Protocol__F cell_conf[i] = malloc(sizeof(Protocol__FlexCellConfig)); protocol__flex_cell_config__init(cell_conf[i]); - cell_conf[i]->phy_cell_id = 1; - cell_conf[i]->has_phy_cell_id = flexran_get_cell_id(mod_id,i); + cell_conf[i]->phy_cell_id = flexran_get_cell_id(mod_id,i); + cell_conf[i]->has_phy_cell_id = 1; cell_conf[i]->cell_id = i; cell_conf[i]->has_cell_id = 1; @@ -1030,6 +1063,10 @@ int flexran_agent_enb_config_reply(mid_t mod_id, const void *params, Protocol__F cell_conf[i]->carrier_index = i; cell_conf[i]->has_carrier_index = 1; + + /* get a pointer to the config which is maintained in the agent throughout + * its lifetime */ + cell_conf[i]->slice_config = flexran_agent_get_slice_config(mod_id); } enb_config_reply_msg->cell_config=cell_conf; } @@ -1090,5 +1127,39 @@ int flexran_agent_destroy_rrc_measurement(Protocol__FlexranMessage *msg){ return 0; } +int flexran_agent_handle_enb_config_reply(mid_t mod_id, const void *params, Protocol__FlexranMessage **msg) +{ + Protocol__FlexranMessage *input = (Protocol__FlexranMessage *)params; + Protocol__FlexEnbConfigReply *enb_config = input->enb_config_reply_msg; + + if (enb_config->n_cell_config == 0) { + LOG_W(FLEXRAN_AGENT, + "received enb_config_reply message does not contain a cell_config\n"); + *msg = NULL; + return 0; + } + + if (enb_config->n_cell_config > 1) + LOG_W(FLEXRAN_AGENT, "ignoring slice configs for other cell except cell 0\n"); + if (enb_config->cell_config[0]->slice_config) { + prepare_update_slice_config(mod_id, enb_config->cell_config[0]->slice_config); + } else { + initiate_soft_restart(mod_id, enb_config->cell_config[0]); + } + + *msg = NULL; + return 0; +} + +int flexran_agent_handle_ue_config_reply(mid_t mod_id, const void *params, Protocol__FlexranMessage **msg) +{ + int i; + Protocol__FlexranMessage *input = (Protocol__FlexranMessage *)params; + Protocol__FlexUeConfigReply *ue_config_reply = input->ue_config_reply_msg; + for (i = 0; i < ue_config_reply->n_ue_config; i++) + prepare_ue_slice_assoc_update(mod_id, ue_config_reply->ue_config[i]); + *msg = NULL; + return 0; +} diff --git a/openair2/ENB_APP/flexran_agent_common.h b/openair2/ENB_APP/flexran_agent_common.h index a43476fe4f08d1342011c92228dfdfed3793e419..c83d3c24819ad30e0dad650a17d8c7c3d7a1c620 100644 --- a/openair2/ENB_APP/flexran_agent_common.h +++ b/openair2/ENB_APP/flexran_agent_common.h @@ -166,4 +166,19 @@ void flexran_agent_send_update_stats(mid_t mod_id); err_code_t flexran_agent_enable_cont_stats_update(mid_t mod_id, xid_t xid, stats_request_config_t *stats_req) ; err_code_t flexran_agent_disable_cont_stats_update(mid_t mod_id); +/* Handle a received eNB config reply message as an "order" to reconfigure. It + * does not come as a reconfiguration message as this is a "structured" + * ProtoBuf message (as opposed to "unstructured" YAML). There is no destructor + * since we do not reply to this message (yet). Instead, the controller has to + * issue another eNB config request message. */ +int flexran_agent_handle_enb_config_reply(mid_t mod_id, const void* params, Protocol__FlexranMessage **msg); + +/* Handle a received UE config reply message as an "order" to reconfigure the + * association of a UE to a slice. It does not come as a reconfiguration + * message as this is a "structured" ProtoBuf message (as opposed to + * "unstructured" YAML). There is no destructor since we do not reply to this + * message (yet). Instead, the controller has to issue another eNB config + * request message. */ +int flexran_agent_handle_ue_config_reply(mid_t mod_id, const void* params, Protocol__FlexranMessage **msg); + #endif diff --git a/openair2/ENB_APP/flexran_agent_common_internal.c b/openair2/ENB_APP/flexran_agent_common_internal.c index e9fc39044d11dbdbd79974f4d5a52d7f56891af6..d288ff5a1813bd6a64c6b736de3685acd9e5fbc8 100644 --- a/openair2/ENB_APP/flexran_agent_common_internal.c +++ b/openair2/ENB_APP/flexran_agent_common_internal.c @@ -31,9 +31,9 @@ #include "flexran_agent_common_internal.h" #include "flexran_agent_mac_internal.h" - /* needed to soft-restart the lte-softmodem */ #include "targets/RT/USER/lte-softmodem.h" +#include "enb_app.h" void handle_reconfiguration(mid_t mod_id) { @@ -589,3 +589,32 @@ int apply_parameter_modification(void *parameter, yaml_parser_t *parser) { return -1; } + +void initiate_soft_restart(module_id_t mod_id, Protocol__FlexCellConfig *c) +{ + uint8_t cc_id = c->has_cell_id ? c->cell_id : 0; + if (c->has_eutra_band) { + flexran_agent_set_operating_eutra_band(mod_id, cc_id, c->eutra_band); + LOG_I(ENB_APP, "Setting eutra_band to %d\n", c->eutra_band); + } + if (c->has_dl_freq && c->has_ul_freq) { + flexran_agent_set_operating_dl_freq(mod_id, cc_id, c->dl_freq); + LOG_I(ENB_APP, "Setting dl_freq to %d\n", c->dl_freq); + int32_t ul_freq_offset = c->ul_freq - c->dl_freq; + flexran_agent_set_operating_ul_freq(mod_id, cc_id, ul_freq_offset); + LOG_I(ENB_APP, "Setting ul_freq to %d\n", c->ul_freq); + } + if (c->has_dl_bandwidth) { + flexran_agent_set_operating_bandwidth(mod_id, cc_id, c->dl_bandwidth); + LOG_I(ENB_APP, "Setting bandwidth to %d\n", c->dl_bandwidth); + if (c->has_ul_bandwidth && c->ul_bandwidth != c->dl_bandwidth) + LOG_W(ENB_APP, "UL/DL bandwidth mismatch, applied DL bandwidth\n"); + } else if (c->has_ul_bandwidth) { + flexran_agent_set_operating_bandwidth(mod_id, cc_id, c->ul_bandwidth); + LOG_I(ENB_APP, "Setting bandwidth to %d\n", c->ul_bandwidth); + } + + MessageDef *msg; + msg = itti_alloc_new_message(TASK_FLEXRAN_AGENT, SOFT_RESTART_MESSAGE); + itti_send_msg_to_task(TASK_ENB_APP, ENB_MODULE_ID_TO_INSTANCE(mod_id), msg); +} diff --git a/openair2/ENB_APP/flexran_agent_common_internal.h b/openair2/ENB_APP/flexran_agent_common_internal.h index bf908ac13232b8743c72411481a8be649d884519..544321e228017900d5334501be1da0d327e0a464 100644 --- a/openair2/ENB_APP/flexran_agent_common_internal.h +++ b/openair2/ENB_APP/flexran_agent_common_internal.h @@ -32,6 +32,7 @@ #include <yaml.h> #include "flexran_agent_defs.h" +#include "flexran.pb-c.h" int apply_reconfiguration_policy(mid_t mod_id, const char *policy, size_t policy_length); @@ -57,4 +58,7 @@ int skip_subsystem_parameters_config(yaml_parser_t *parser); //that is not yet implmeneted in order to skip its configuration, without affecting the rest int skip_parameter_modification(yaml_parser_t *parser); +// applies reconfiguration parameters and notifies ENB APP +void initiate_soft_restart(mid_t mod_id, Protocol__FlexCellConfig *c); + #endif diff --git a/openair2/ENB_APP/flexran_agent_handler.c b/openair2/ENB_APP/flexran_agent_handler.c index 66123fd71c60197ec9c40297bce7dc59f1e66343..74ea5bfa1fa80fa69d59a0af958bb7985f56aaa3 100644 --- a/openair2/ENB_APP/flexran_agent_handler.c +++ b/openair2/ENB_APP/flexran_agent_handler.c @@ -46,9 +46,9 @@ flexran_agent_message_decoded_callback agent_messages_callback[][3] = { {0, 0, 0}, /*PROTOCOK__FLEXRAN_MESSAGE__MSG_SF_TRIGGER_MSG*/ {0, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_UL_SR_INFO_MSG*/ {flexran_agent_enb_config_reply, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_ENB_CONFIG_REQUEST_MSG*/ - {0, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_ENB_CONFIG_REPLY_MSG*/ + {flexran_agent_handle_enb_config_reply, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_ENB_CONFIG_REPLY_MSG*/ {flexran_agent_ue_config_reply, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_UE_CONFIG_REQUEST_MSG*/ - {0, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_UE_CONFIG_REPLY_MSG*/ + {flexran_agent_handle_ue_config_reply, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_UE_CONFIG_REPLY_MSG*/ {flexran_agent_lc_config_reply, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_LC_CONFIG_REQUEST_MSG*/ {0, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_LC_CONFIG_REPLY_MSG*/ {flexran_agent_mac_handle_dl_mac_config, 0, 0}, /*PROTOCOL__FLEXRAN_MESSAGE__MSG_DL_MAC_CONFIG_MSG*/ @@ -210,6 +210,7 @@ int flexran_agent_handle_stats(mid_t mod_id, const void *params, Protocol__Flexr //TODO: We do not deal with multiple CCs at the moment and eNB id is 0 int enb_id = mod_id; + int UE_id; //eNB_MAC_INST *eNB = &eNB_mac_inst[enb_id]; //UE_list_t *eNB_UE_list= &eNB->UE_list; @@ -249,8 +250,9 @@ int flexran_agent_handle_stats(mid_t mod_id, const void *params, Protocol__Flexr goto error; } for (i = 0; i < report_config.nr_ue; i++) { - report_config.ue_report_type[i].ue_rnti = flexran_get_ue_crnti(enb_id, i); //eNB_UE_list->eNB_UE_stats[UE_PCCID(enb_id,i)][i].crnti; - report_config.ue_report_type[i].ue_report_flags = ue_flags; + UE_id = flexran_get_ue_id(mod_id, i); + report_config.ue_report_type[i].ue_rnti = flexran_get_ue_crnti(enb_id, UE_id); //eNB_UE_list->eNB_UE_stats[UE_PCCID(enb_id,i)][i].crnti; + report_config.ue_report_type[i].ue_report_flags = ue_flags; } //Set the number of CCs and create a list with the cell stats configs report_config.nr_cc = MAX_NUM_CCs; @@ -359,7 +361,8 @@ int flexran_agent_handle_stats(mid_t mod_id, const void *params, Protocol__Flexr goto error; } for (i = 0; i < report_config.nr_ue; i++) { - report_config.ue_report_type[i].ue_rnti = ue_req->rnti[i]; + UE_id = flexran_get_ue_id(mod_id, i); + report_config.ue_report_type[i].ue_rnti = ue_req->rnti[UE_id]; report_config.ue_report_type[i].ue_report_flags = ue_req->flags; } break; diff --git a/openair2/ENB_APP/flexran_agent_ran_api.c b/openair2/ENB_APP/flexran_agent_ran_api.c index e39d676061f5e38a47078ee38001c785790811be..973d322a5f30177871bad5ac6984411c447d49dd 100644 --- a/openair2/ENB_APP/flexran_agent_ran_api.c +++ b/openair2/ENB_APP/flexran_agent_ran_api.c @@ -26,6 +26,7 @@ * \version 0.1 */ +#include <dlfcn.h> #include "flexran_agent_ran_api.h" static inline int phy_is_present(mid_t mod_id, uint8_t cc_id) @@ -112,6 +113,21 @@ int flexran_get_num_ues(mid_t mod_id) return RC.mac[mod_id]->UE_list.num_UEs; } +int flexran_get_ue_id(mid_t mod_id, int i) +{ + int n; + if (!mac_is_present(mod_id)) return 0; + /* get the (i+1)'th active UE */ + for (n = 0; n < MAX_MOBILES_PER_ENB; ++n) { + if (RC.mac[mod_id]->UE_list.active[n] == TRUE) { + if (i == 0) + return n; + --i; + } + } + return 0; +} + rnti_t flexran_get_ue_crnti(mid_t mod_id, mid_t ue_id) { return UE_RNTI(mod_id, ue_id); @@ -1193,106 +1209,158 @@ void flexran_agent_set_operating_frame_type(mid_t mod_id, uint8_t cc_id, lte_fra /*********** PDCP *************/ /*PDCP super frame counter flexRAN*/ -uint32_t flexran_get_pdcp_sfn(const mid_t mod_id){ + +/* TODO the following is a hack. all the functions below should instead already + * receive the PDCP's uid and operate on it and the caller has the obligation + * to get the ID for this layer. + */ +static inline uint16_t flexran_get_pdcp_uid(mid_t mod_id, mid_t ue_id) +{ + rnti_t rnti = flexran_get_ue_crnti(mod_id, ue_id); + if (rnti == NOT_A_RNTI) return 0; + + for (uint16_t pdcp_uid = 0; pdcp_uid < MAX_MOBILES_PER_ENB; ++pdcp_uid) { + if (pdcp_enb[mod_id].rnti[pdcp_uid] == rnti) + return pdcp_uid; + } + return 0; +} + +uint32_t flexran_get_pdcp_sfn(mid_t mod_id) +{ return pdcp_enb[mod_id].sfn; } /*PDCP super frame counter flexRAN*/ -void flexran_set_pdcp_tx_stat_window(const mid_t mod_id, const mid_t ue_id, uint16_t obs_window){ +void flexran_set_pdcp_tx_stat_window(mid_t mod_id, mid_t ue_id, uint16_t obs_window) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); if (obs_window > 0 ){ - Pdcp_stats_tx_window_ms[mod_id][ue_id]=obs_window; + Pdcp_stats_tx_window_ms[mod_id][uid]=obs_window; } else{ - Pdcp_stats_tx_window_ms[mod_id][ue_id]=1000; + Pdcp_stats_tx_window_ms[mod_id][uid]=1000; } } /*PDCP super frame counter flexRAN*/ -void flexran_set_pdcp_rx_stat_window(const mid_t mod_id, const mid_t ue_id, uint16_t obs_window){ +void flexran_set_pdcp_rx_stat_window(mid_t mod_id, mid_t ue_id, uint16_t obs_window) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); if (obs_window > 0 ){ - Pdcp_stats_rx_window_ms[mod_id][ue_id]=obs_window; + Pdcp_stats_rx_window_ms[mod_id][uid]=obs_window; } else{ - Pdcp_stats_rx_window_ms[mod_id][ue_id]=1000; + Pdcp_stats_rx_window_ms[mod_id][uid]=1000; } } /*PDCP num tx pdu status flexRAN*/ -uint32_t flexran_get_pdcp_tx(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - if (mod_id <0 || mod_id> MAX_NUM_CCs || ue_id<0 || ue_id> MAX_MOBILES_PER_ENB || lcid<0 || lcid>NB_RB_MAX) +uint32_t flexran_get_pdcp_tx(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + if (mod_id < 0 || mod_id > MAX_NUM_CCs || ue_id < 0 || ue_id > MAX_MOBILES_PER_ENB + || lcid < 0 || lcid > NB_RB_MAX) return -1; - return Pdcp_stats_tx[mod_id][ue_id][lcid]; + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_tx[mod_id][uid][lcid]; } /*PDCP num tx bytes status flexRAN*/ -uint32_t flexran_get_pdcp_tx_bytes(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_tx_bytes[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_tx_bytes(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_tx_bytes[mod_id][uid][lcid]; } /*PDCP number of transmit packet / second status flexRAN*/ -uint32_t flexran_get_pdcp_tx_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_tx_w[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_tx_w(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_tx_w[mod_id][uid][lcid]; } /*PDCP throughput (bit/s) status flexRAN*/ -uint32_t flexran_get_pdcp_tx_bytes_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_tx_bytes_w[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_tx_bytes_w(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_tx_bytes_w[mod_id][uid][lcid]; } /*PDCP tx sequence number flexRAN*/ -uint32_t flexran_get_pdcp_tx_sn(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_tx_sn[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_tx_sn(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_tx_sn[mod_id][uid][lcid]; } /*PDCP tx aggregated packet arrival flexRAN*/ -uint32_t flexran_get_pdcp_tx_aiat(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_tx_aiat[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_tx_aiat(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_tx_aiat[mod_id][uid][lcid]; } /*PDCP tx aggregated packet arrival flexRAN*/ -uint32_t flexran_get_pdcp_tx_aiat_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_tx_aiat_w[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_tx_aiat_w(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_tx_aiat_w[mod_id][uid][lcid]; } - /*PDCP num rx pdu status flexRAN*/ -uint32_t flexran_get_pdcp_rx(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_rx[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_rx(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_rx[mod_id][uid][lcid]; } /*PDCP num rx bytes status flexRAN*/ -uint32_t flexran_get_pdcp_rx_bytes(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_rx_bytes[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_rx_bytes(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_rx_bytes[mod_id][uid][lcid]; } /*PDCP number of received packet / second flexRAN*/ -uint32_t flexran_get_pdcp_rx_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_rx_w[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_rx_w(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_rx_w[mod_id][uid][lcid]; } /*PDCP gootput (bit/s) status flexRAN*/ -uint32_t flexran_get_pdcp_rx_bytes_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_rx_bytes_w[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_rx_bytes_w(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_rx_bytes_w[mod_id][uid][lcid]; } /*PDCP rx sequence number flexRAN*/ -uint32_t flexran_get_pdcp_rx_sn(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_rx_sn[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_rx_sn(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_rx_sn[mod_id][uid][lcid]; } /*PDCP rx aggregated packet arrival flexRAN*/ -uint32_t flexran_get_pdcp_rx_aiat(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_rx_aiat[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_rx_aiat(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_rx_aiat[mod_id][uid][lcid]; } /*PDCP rx aggregated packet arrival flexRAN*/ -uint32_t flexran_get_pdcp_rx_aiat_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_rx_aiat_w[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_rx_aiat_w(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_rx_aiat_w[mod_id][uid][lcid]; } /*PDCP num of received outoforder pdu status flexRAN*/ -uint32_t flexran_get_pdcp_rx_oo(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid){ - return Pdcp_stats_rx_outoforder[mod_id][ue_id][lcid]; +uint32_t flexran_get_pdcp_rx_oo(mid_t mod_id, mid_t ue_id, lcid_t lcid) +{ + uint16_t uid = flexran_get_pdcp_uid(mod_id, ue_id); + return Pdcp_stats_rx_outoforder[mod_id][uid][lcid]; } /******************** RRC *****************************/ @@ -1393,3 +1461,419 @@ float flexran_get_rrc_neigh_rsrq(mid_t mod_id, mid_t ue_id, int cell_id) if (!ue_context_p->ue_context.measResults->measResultNeighCells->choice.measResultListEUTRA.list.array[cell_id]->measResult.rsrqResult) return 0; return RSRQ_meas_mapping[*(ue_context_p->ue_context.measResults->measResultNeighCells->choice.measResultListEUTRA.list.array[cell_id]->measResult.rsrqResult)]; } + +int flexran_get_ue_dl_slice_id(mid_t mod_id, mid_t ue_id) +{ + if (!mac_is_present(mod_id)) return -1; + int slice_idx = RC.mac[mod_id]->UE_list.assoc_dl_slice_idx[ue_id]; + if (slice_idx >= 0 && slice_idx < RC.mac[mod_id]->slice_info.n_dl) + return RC.mac[mod_id]->slice_info.dl[slice_idx].id; + return 0; +} + +void flexran_set_ue_dl_slice_idx(mid_t mod_id, mid_t ue_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return; + if (flexran_get_ue_crnti(mod_id, ue_id) == NOT_A_RNTI) return; + if (!flexran_dl_slice_exists(mod_id, slice_idx)) return; + RC.mac[mod_id]->UE_list.assoc_dl_slice_idx[ue_id] = slice_idx; +} + +int flexran_get_ue_ul_slice_id(mid_t mod_id, mid_t ue_id) +{ + if (!mac_is_present(mod_id)) return -1; + int slice_idx = RC.mac[mod_id]->UE_list.assoc_ul_slice_idx[ue_id]; + if (slice_idx >= 0 && slice_idx < RC.mac[mod_id]->slice_info.n_ul) + return RC.mac[mod_id]->slice_info.ul[slice_idx].id; + return 0; +} + +void flexran_set_ue_ul_slice_idx(mid_t mod_id, mid_t ue_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return; + if (flexran_get_ue_crnti(mod_id, ue_id) == NOT_A_RNTI) return; + if (!flexran_ul_slice_exists(mod_id, slice_idx)) return; + RC.mac[mod_id]->UE_list.assoc_ul_slice_idx[ue_id] = slice_idx; +} + +int flexran_dl_slice_exists(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return slice_idx >= 0 && slice_idx < RC.mac[mod_id]->slice_info.n_dl; +} + +int flexran_create_dl_slice(mid_t mod_id, slice_id_t slice_id) +{ + if (!mac_is_present(mod_id)) return -1; + int newidx = RC.mac[mod_id]->slice_info.n_dl; + if (newidx >= MAX_NUM_SLICES) return -1; + ++RC.mac[mod_id]->slice_info.n_dl; + flexran_set_dl_slice_id(mod_id, newidx, slice_id); + return newidx; +} + +int flexran_find_dl_slice(mid_t mod_id, slice_id_t slice_id) +{ + if (!mac_is_present(mod_id)) return -1; + slice_info_t *sli = &RC.mac[mod_id]->slice_info; + int n = sli->n_dl; + for (int i = 0; i < n; i++) { + if (sli->dl[i].id == slice_id) return i; + } + return -1; +} + +int flexran_remove_dl_slice(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + slice_info_t *sli = &RC.mac[mod_id]->slice_info; + if (sli->n_dl <= 1) return -1; + + if (sli->dl[slice_idx].sched_name) free(sli->dl[slice_idx].sched_name); + --sli->n_dl; + /* move last element to the position of the removed one */ + if (slice_idx != sli->n_dl) + memcpy(&sli->dl[slice_idx], &sli->dl[sli->n_dl], sizeof(sli->dl[sli->n_dl])); + memset(&sli->dl[sli->n_dl], 0, sizeof(sli->dl[sli->n_dl])); + + /* all UEs that have been in the old slice are put into slice index 0 */ + int *assoc_list = RC.mac[mod_id]->UE_list.assoc_dl_slice_idx; + for (int i = 0; i < MAX_MOBILES_PER_ENB; ++i) { + if (assoc_list[i] == slice_idx) + assoc_list[i] = 0; + } + return sli->n_dl; +} + +int flexran_get_num_dl_slices(mid_t mod_id) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.n_dl; +} + +int flexran_get_intraslice_sharing_active(mid_t mod_id) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.intraslice_share_active; +} +void flexran_set_intraslice_sharing_active(mid_t mod_id, int intraslice_active) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.intraslice_share_active = intraslice_active; +} + +int flexran_get_interslice_sharing_active(mid_t mod_id) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.interslice_share_active; +} +void flexran_set_interslice_sharing_active(mid_t mod_id, int interslice_active) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.interslice_share_active = interslice_active; +} + +slice_id_t flexran_get_dl_slice_id(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.dl[slice_idx].id; +} +void flexran_set_dl_slice_id(mid_t mod_id, int slice_idx, slice_id_t slice_id) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.dl[slice_idx].id = slice_id; +} + +int flexran_get_dl_slice_percentage(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.dl[slice_idx].pct * 100.0f; +} +void flexran_set_dl_slice_percentage(mid_t mod_id, int slice_idx, int percentage) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.dl[slice_idx].pct = percentage / 100.0f; +} + +int flexran_get_dl_slice_isolation(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.dl[slice_idx].isol; +} +void flexran_set_dl_slice_isolation(mid_t mod_id, int slice_idx, int is_isolated) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.dl[slice_idx].isol = is_isolated; +} + +int flexran_get_dl_slice_priority(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.dl[slice_idx].prio; +} +void flexran_set_dl_slice_priority(mid_t mod_id, int slice_idx, int priority) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.dl[slice_idx].prio = priority; +} + +int flexran_get_dl_slice_position_low(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.dl[slice_idx].pos_low; +} +void flexran_set_dl_slice_position_low(mid_t mod_id, int slice_idx, int poslow) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.dl[slice_idx].pos_low = poslow; +} + +int flexran_get_dl_slice_position_high(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.dl[slice_idx].pos_high; +} +void flexran_set_dl_slice_position_high(mid_t mod_id, int slice_idx, int poshigh) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.dl[slice_idx].pos_high = poshigh; +} + +int flexran_get_dl_slice_maxmcs(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.dl[slice_idx].maxmcs; +} +void flexran_set_dl_slice_maxmcs(mid_t mod_id, int slice_idx, int maxmcs) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.dl[slice_idx].maxmcs = maxmcs; +} + +int flexran_get_dl_slice_sorting(mid_t mod_id, int slice_idx, Protocol__FlexDlSorting **sorting_list) +{ + if (!mac_is_present(mod_id)) return -1; + if (!(*sorting_list)) { + *sorting_list = calloc(CR_NUM, sizeof(Protocol__FlexDlSorting)); + if (!(*sorting_list)) return -1; + } + uint32_t policy = RC.mac[mod_id]->slice_info.dl[slice_idx].sorting; + for (int i = 0; i < CR_NUM; i++) { + switch (policy >> 4 * (CR_NUM - 1 - i) & 0xF) { + case CR_ROUND: + (*sorting_list)[i] = PROTOCOL__FLEX_DL_SORTING__CR_ROUND; + break; + case CR_SRB12: + (*sorting_list)[i] = PROTOCOL__FLEX_DL_SORTING__CR_SRB12; + break; + case CR_HOL: + (*sorting_list)[i] = PROTOCOL__FLEX_DL_SORTING__CR_HOL; + break; + case CR_LC: + (*sorting_list)[i] = PROTOCOL__FLEX_DL_SORTING__CR_LC; + break; + case CR_CQI: + (*sorting_list)[i] = PROTOCOL__FLEX_DL_SORTING__CR_CQI; + break; + case CR_LCP: + (*sorting_list)[i] = PROTOCOL__FLEX_DL_SORTING__CR_LCP; + break; + default: + /* this should not happen, but a "default" */ + (*sorting_list)[i] = PROTOCOL__FLEX_DL_SORTING__CR_ROUND; + break; + } + } + return CR_NUM; +} +void flexran_set_dl_slice_sorting(mid_t mod_id, int slice_idx, Protocol__FlexDlSorting *sorting_list, int n) +{ + if (!mac_is_present(mod_id)) return; + uint32_t policy = 0; + for (int i = 0; i < n && i < CR_NUM; i++) { + switch (sorting_list[i]) { + case PROTOCOL__FLEX_DL_SORTING__CR_ROUND: + policy = policy << 4 | CR_ROUND; + break; + case PROTOCOL__FLEX_DL_SORTING__CR_SRB12: + policy = policy << 4 | CR_SRB12; + break; + case PROTOCOL__FLEX_DL_SORTING__CR_HOL: + policy = policy << 4 | CR_HOL; + break; + case PROTOCOL__FLEX_DL_SORTING__CR_LC: + policy = policy << 4 | CR_LC; + break; + case PROTOCOL__FLEX_DL_SORTING__CR_CQI: + policy = policy << 4 | CR_CQI; + break; + case PROTOCOL__FLEX_DL_SORTING__CR_LCP: + policy = policy << 4 | CR_LCP; + break; + default: /* suppresses warnings */ + policy = policy << 4 | CR_ROUND; + break; + } + } + /* fill up with 0 == CR_ROUND */ + if (CR_NUM > n) policy = policy << 4 * (CR_NUM - n); + RC.mac[mod_id]->slice_info.dl[slice_idx].sorting = policy; +} + +Protocol__FlexDlAccountingPolicy flexran_get_dl_slice_accounting_policy(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return PROTOCOL__FLEX_DL_ACCOUNTING_POLICY__POL_FAIR; + switch (RC.mac[mod_id]->slice_info.dl[slice_idx].accounting) { + case POL_FAIR: + return PROTOCOL__FLEX_DL_ACCOUNTING_POLICY__POL_FAIR; + case POL_GREEDY: + return PROTOCOL__FLEX_DL_ACCOUNTING_POLICY__POL_GREEDY; + default: + return PROTOCOL__FLEX_DL_ACCOUNTING_POLICY__POL_FAIR; + } +} +void flexran_set_dl_slice_accounting_policy(mid_t mod_id, int slice_idx, Protocol__FlexDlAccountingPolicy accounting) +{ + if (!mac_is_present(mod_id)) return; + switch (accounting) { + case PROTOCOL__FLEX_DL_ACCOUNTING_POLICY__POL_FAIR: + RC.mac[mod_id]->slice_info.dl[slice_idx].accounting = POL_FAIR; + return; + case PROTOCOL__FLEX_DL_ACCOUNTING_POLICY__POL_GREEDY: + RC.mac[mod_id]->slice_info.dl[slice_idx].accounting = POL_GREEDY; + return; + default: + RC.mac[mod_id]->slice_info.dl[slice_idx].accounting = POL_FAIR; + return; + } +} + +char *flexran_get_dl_slice_scheduler(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return NULL; + return RC.mac[mod_id]->slice_info.dl[slice_idx].sched_name; +} +int flexran_set_dl_slice_scheduler(mid_t mod_id, int slice_idx, char *name) +{ + if (!mac_is_present(mod_id)) return 0; + if (RC.mac[mod_id]->slice_info.dl[slice_idx].sched_name) + free(RC.mac[mod_id]->slice_info.dl[slice_idx].sched_name); + RC.mac[mod_id]->slice_info.dl[slice_idx].sched_name = strdup(name); + RC.mac[mod_id]->slice_info.dl[slice_idx].sched_cb = dlsym(NULL, name); + return RC.mac[mod_id]->slice_info.dl[slice_idx].sched_cb != NULL; +} + +int flexran_create_ul_slice(mid_t mod_id, slice_id_t slice_id) +{ + if (!mac_is_present(mod_id)) return -1; + int newidx = RC.mac[mod_id]->slice_info.n_ul; + if (newidx >= MAX_NUM_SLICES) return -1; + ++RC.mac[mod_id]->slice_info.n_ul; + flexran_set_ul_slice_id(mod_id, newidx, slice_id); + return newidx; +} + +int flexran_find_ul_slice(mid_t mod_id, slice_id_t slice_id) +{ + if (!mac_is_present(mod_id)) return -1; + slice_info_t *sli = &RC.mac[mod_id]->slice_info; + int n = sli->n_ul; + for (int i = 0; i < n; i++) { + if (sli->ul[i].id == slice_id) return i; + } + return -1; +} + +int flexran_remove_ul_slice(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + slice_info_t *sli = &RC.mac[mod_id]->slice_info; + if (sli->n_ul <= 1) return -1; + + if (sli->ul[slice_idx].sched_name) free(sli->ul[slice_idx].sched_name); + --sli->n_ul; + /* move last element to the position of the removed one */ + if (slice_idx != sli->n_ul) + memcpy(&sli->ul[slice_idx], &sli->ul[sli->n_ul], sizeof(sli->ul[sli->n_ul])); + memset(&sli->ul[sli->n_ul], 0, sizeof(sli->ul[sli->n_ul])); + + /* all UEs that have been in the old slice are put into slice index 0 */ + int *assoc_list = RC.mac[mod_id]->UE_list.assoc_ul_slice_idx; + for (int i = 0; i < MAX_MOBILES_PER_ENB; ++i) { + if (assoc_list[i] == slice_idx) + assoc_list[i] = 0; + } + return sli->n_ul; +} + +int flexran_get_num_ul_slices(mid_t mod_id) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.n_ul; +} + +int flexran_ul_slice_exists(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return slice_idx >= 0 && slice_idx < RC.mac[mod_id]->slice_info.n_ul; +} + +slice_id_t flexran_get_ul_slice_id(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.ul[slice_idx].id; +} +void flexran_set_ul_slice_id(mid_t mod_id, int slice_idx, slice_id_t slice_id) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.ul[slice_idx].id = slice_id; +} + +int flexran_get_ul_slice_percentage(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.ul[slice_idx].pct * 100.0f; +} +void flexran_set_ul_slice_percentage(mid_t mod_id, int slice_idx, int percentage) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.ul[slice_idx].pct = percentage / 100.0f; +} + +int flexran_get_ul_slice_first_rb(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.ul[slice_idx].first_rb; +} + +void flexran_set_ul_slice_first_rb(mid_t mod_id, int slice_idx, int first_rb) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.ul[slice_idx].first_rb = first_rb; +} + +int flexran_get_ul_slice_maxmcs(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return -1; + return RC.mac[mod_id]->slice_info.ul[slice_idx].maxmcs; +} +void flexran_set_ul_slice_maxmcs(mid_t mod_id, int slice_idx, int maxmcs) +{ + if (!mac_is_present(mod_id)) return; + RC.mac[mod_id]->slice_info.ul[slice_idx].maxmcs = maxmcs; +} + +char *flexran_get_ul_slice_scheduler(mid_t mod_id, int slice_idx) +{ + if (!mac_is_present(mod_id)) return NULL; + return RC.mac[mod_id]->slice_info.ul[slice_idx].sched_name; +} +int flexran_set_ul_slice_scheduler(mid_t mod_id, int slice_idx, char *name) +{ + if (!mac_is_present(mod_id)) return 0; + if (RC.mac[mod_id]->slice_info.ul[slice_idx].sched_name) + free(RC.mac[mod_id]->slice_info.ul[slice_idx].sched_name); + RC.mac[mod_id]->slice_info.ul[slice_idx].sched_name = strdup(name); + RC.mac[mod_id]->slice_info.ul[slice_idx].sched_cb = dlsym(NULL, name); + return RC.mac[mod_id]->slice_info.ul[slice_idx].sched_cb != NULL; +} diff --git a/openair2/ENB_APP/flexran_agent_ran_api.h b/openair2/ENB_APP/flexran_agent_ran_api.h index dacca55ab78a2bba6f642c25bc7e5d710429ba3a..37bdaae170d856064f33b3b4bf8c87851a59d42b 100644 --- a/openair2/ENB_APP/flexran_agent_ran_api.h +++ b/openair2/ENB_APP/flexran_agent_ran_api.h @@ -73,6 +73,10 @@ uint16_t flexran_get_future_sfn_sf(mid_t mod_id, int ahead_of_time); /* Return the number of attached UEs */ int flexran_get_num_ues(mid_t mod_id); +/* Return the UE id of attached UE as opposed to the index [0,NUM UEs] (i.e., + * the i'th active UE). Returns 0 if the i'th active UE could not be found. */ +int flexran_get_ue_id(mid_t mod_id, int i); + /* Get the rnti of a UE with id ue_id */ rnti_t flexran_get_ue_crnti(mid_t mod_id, mid_t ue_id); @@ -421,59 +425,59 @@ uint8_t flexran_get_rrc_status(mid_t mod_id, mid_t ue_id); /***************************** PDCP ***********************/ /*PDCP superframe numberflexRAN*/ -uint32_t flexran_get_pdcp_sfn(const mid_t mod_id); +uint32_t flexran_get_pdcp_sfn(mid_t mod_id); /*PDCP pdcp tx stats window*/ -void flexran_set_pdcp_tx_stat_window(const mid_t mod_id, const mid_t ue_id, uint16_t obs_window); +void flexran_set_pdcp_tx_stat_window(mid_t mod_id, mid_t ue_id, uint16_t obs_window); /*PDCP pdcp rx stats window*/ -void flexran_set_pdcp_rx_stat_window(const mid_t mod_id, const mid_t ue_id, uint16_t obs_window); +void flexran_set_pdcp_rx_stat_window(mid_t mod_id, mid_t ue_id, uint16_t obs_window); /*PDCP num tx pdu status flexRAN*/ -uint32_t flexran_get_pdcp_tx(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_tx(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP num tx bytes status flexRAN*/ -uint32_t flexran_get_pdcp_tx_bytes(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_tx_bytes(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP number of transmit packet / second status flexRAN*/ -uint32_t flexran_get_pdcp_tx_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_tx_w(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP pdcp tx bytes in a given window flexRAN*/ -uint32_t flexran_get_pdcp_tx_bytes_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_tx_bytes_w(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP tx sequence number flexRAN*/ -uint32_t flexran_get_pdcp_tx_sn(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_tx_sn(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP tx aggregated packet arrival flexRAN*/ -uint32_t flexran_get_pdcp_tx_aiat(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_tx_aiat(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP tx aggregated packet arrival per second flexRAN*/ -uint32_t flexran_get_pdcp_tx_aiat_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_tx_aiat_w(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP num rx pdu status flexRAN*/ -uint32_t flexran_get_pdcp_rx(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_rx(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP num rx bytes status flexRAN*/ -uint32_t flexran_get_pdcp_rx_bytes(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_rx_bytes(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP number of received packet / second flexRAN*/ -uint32_t flexran_get_pdcp_rx_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_rx_w(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP gootput (bit/s) status flexRAN*/ -uint32_t flexran_get_pdcp_rx_bytes_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_rx_bytes_w(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP rx sequence number flexRAN*/ -uint32_t flexran_get_pdcp_rx_sn(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_rx_sn(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP rx aggregated packet arrival flexRAN*/ -uint32_t flexran_get_pdcp_rx_aiat(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_rx_aiat(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP rx aggregated packet arrival per second flexRAN*/ -uint32_t flexran_get_pdcp_rx_aiat_w(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_rx_aiat_w(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*PDCP num of received outoforder pdu status flexRAN*/ -uint32_t flexran_get_pdcp_rx_oo(const mid_t mod_id, const mid_t ue_id, const lcid_t lcid); +uint32_t flexran_get_pdcp_rx_oo(mid_t mod_id, mid_t ue_id, lcid_t lcid); /*********************RRC**********************/ /*Get primary cell measuremeant id flexRAN*/ @@ -504,3 +508,155 @@ int flexran_get_rrc_neigh_plmn_mcc(mid_t mod_id, mid_t ue_id, int cell_id); */ /*Get MNC PLMN identity neighbouring Cell*/ /* currently not implemented int flexran_get_rrc_neigh_plmn_mnc(mid_t mod_id, mid_t ue_id, int cell_id); */ + +/************************** Slice configuration **************************/ + +/* Get the DL slice ID for a UE */ +int flexran_get_ue_dl_slice_id(mid_t mod_id, mid_t ue_id); + +/* Set the DL slice index(!) for a UE */ +void flexran_set_ue_dl_slice_idx(mid_t mod_id, mid_t ue_id, int slice_idx); + +/* Get the UL slice ID for a UE */ +int flexran_get_ue_ul_slice_id(mid_t mod_id, mid_t ue_id); + +/* Set the UL slice index(!) for a UE */ +void flexran_set_ue_ul_slice_idx(mid_t mod_id, mid_t ue_id, int slice_idx); + +/* Whether intraslice sharing is active, return boolean */ +int flexran_get_intraslice_sharing_active(mid_t mod_id); +/* Set whether intraslice sharing is active */ +void flexran_set_intraslice_sharing_active(mid_t mod_id, int intraslice_active); + +/* Whether intraslice sharing is active, return boolean */ +int flexran_get_interslice_sharing_active(mid_t mod_id); +/* Set whether intraslice sharing is active */ +void flexran_set_interslice_sharing_active(mid_t mod_id, int interslice_active); + +/* Get the number of slices in DL */ +int flexran_get_num_dl_slices(mid_t mod_id); + +/* Query slice existence in DL. Return is boolean value */ +int flexran_dl_slice_exists(mid_t mod_id, int slice_idx); + +/* Create slice in DL, returns the new slice index */ +int flexran_create_dl_slice(mid_t mod_id, slice_id_t slice_id); +/* Finds slice in DL with given slice_id and returns slice index */ +int flexran_find_dl_slice(mid_t mod_id, slice_id_t slice_id); +/* Remove slice in DL, returns new number of slices or -1 on error */ +int flexran_remove_dl_slice(mid_t mod_id, int slice_idx); + +/* Get the ID of a slice in DL */ +slice_id_t flexran_get_dl_slice_id(mid_t mod_id, int slice_idx); +/* Set the ID of a slice in DL */ +void flexran_set_dl_slice_id(mid_t mod_id, int slice_idx, slice_id_t slice_id); + +/* Get the RB share a slice in DL, value 0-100 */ +int flexran_get_dl_slice_percentage(mid_t mod_id, int slice_idx); +/* Set the RB share a slice in DL, value 0-100 */ +void flexran_set_dl_slice_percentage(mid_t mod_id, int slice_idx, int percentage); + +/* Whether a slice in DL is isolated */ +int flexran_get_dl_slice_isolation(mid_t mod_id, int slice_idx); +/* Set whether a slice in DL is isolated */ +void flexran_set_dl_slice_isolation(mid_t mod_id, int slice_idx, int is_isolated); + +/* Get the priority of a slice in DL */ +int flexran_get_dl_slice_priority(mid_t mod_id, int slice_idx); +/* Get the priority of a slice in DL */ +void flexran_set_dl_slice_priority(mid_t mod_id, int slice_idx, int priority); + +/* Get the lower end of the frequency range for the slice positioning in DL */ +int flexran_get_dl_slice_position_low(mid_t mod_id, int slice_idx); +/* Set the lower end of the frequency range for the slice positioning in DL */ +void flexran_set_dl_slice_position_low(mid_t mod_id, int slice_idx, int poslow); + +/* Get the higher end of the frequency range for the slice positioning in DL */ +int flexran_get_dl_slice_position_high(mid_t mod_id, int slice_idx); +/* Set the higher end of the frequency range for the slice positioning in DL */ +void flexran_set_dl_slice_position_high(mid_t mod_id, int slice_idx, int poshigh); + +/* Get the maximum MCS for slice in DL */ +int flexran_get_dl_slice_maxmcs(mid_t mod_id, int slice_idx); +/* Set the maximum MCS for slice in DL */ +void flexran_set_dl_slice_maxmcs(mid_t mod_id, int slice_idx, int maxmcs); + +/* Get the sorting order of a slice in DL, return value is number of elements + * in sorting_list */ +int flexran_get_dl_slice_sorting(mid_t mod_id, int slice_idx, Protocol__FlexDlSorting **sorting_list); +/* Set the sorting order of a slice in DL */ +void flexran_set_dl_slice_sorting(mid_t mod_id, int slice_idx, Protocol__FlexDlSorting *sorting_list, int n); + +/* Get the accounting policy for a slice in DL */ +Protocol__FlexDlAccountingPolicy flexran_get_dl_slice_accounting_policy(mid_t mod_id, int slice_idx); +/* Set the accounting policy for a slice in DL */ +void flexran_set_dl_slice_accounting_policy(mid_t mod_id, int slice_idx, Protocol__FlexDlAccountingPolicy accounting); + +/* Get the scheduler name for a slice in DL */ +char *flexran_get_dl_slice_scheduler(mid_t mod_id, int slice_idx); +/* Set the scheduler name for a slice in DL */ +int flexran_set_dl_slice_scheduler(mid_t mod_id, int slice_idx, char *name); + +/* Get the number of slices in UL */ +int flexran_get_num_ul_slices(mid_t mod_id); + +/* Query slice existence in UL. Return is boolean value */ +int flexran_ul_slice_exists(mid_t mod_id, int slice_idx); + +/* Create slice in UL, returns the new slice index */ +int flexran_create_ul_slice(mid_t mod_id, slice_id_t slice_id); +/* Finds slice in UL with given slice_id and returns slice index */ +int flexran_find_ul_slice(mid_t mod_id, slice_id_t slice_id); +/* Remove slice in UL */ +int flexran_remove_ul_slice(mid_t mod_id, int slice_idx); + +/* Get the ID of a slice in UL */ +slice_id_t flexran_get_ul_slice_id(mid_t mod_id, int slice_idx); +/* Set the ID of a slice in UL */ +void flexran_set_ul_slice_id(mid_t mod_id, int slice_idx, slice_id_t slice_id); + +/* Get the RB share a slice in UL, value 0-100 */ +int flexran_get_ul_slice_percentage(mid_t mod_id, int slice_idx); +/* Set the RB share a slice in UL, value 0-100 */ +void flexran_set_ul_slice_percentage(mid_t mod_id, int slice_idx, int percentage); + +/* TODO Whether a slice in UL is isolated */ +/*int flexran_get_ul_slice_isolation(mid_t mod_id, int slice_idx);*/ +/* TODO Set whether a slice in UL is isolated */ +/*void flexran_set_ul_slice_isolation(mid_t mod_id, int slice_idx, int is_isolated);*/ + +/* TODO Get the priority of a slice in UL */ +/*int flexran_get_ul_slice_priority(mid_t mod_id, int slice_idx);*/ +/* TODO Set the priority of a slice in UL */ +/*void flexran_set_ul_slice_priority(mid_t mod_id, int slice_idx, int priority);*/ + +/* Get the first RB for allocation in a slice in UL */ +int flexran_get_ul_slice_first_rb(mid_t mod_id, int slice_idx); +/* Set the first RB for allocation in a slice in UL */ +void flexran_set_ul_slice_first_rb(mid_t mod_id, int slice_idx, int first_rb); + +/* TODO Get the number of RB for the allocation in a slice in UL */ +/*int flexran_get_ul_slice_length_rb(mid_t mod_id, int slice_idx);*/ +/* TODO Set the of number of RB for the allocation in a slice in UL */ +/*void flexran_set_ul_slice_length_rb(mid_t mod_id, int slice_idx, int poshigh);*/ + +/* Get the maximum MCS for slice in UL */ +int flexran_get_ul_slice_maxmcs(mid_t mod_id, int slice_idx); +/* Set the maximum MCS for slice in UL */ +void flexran_set_ul_slice_maxmcs(mid_t mod_id, int slice_idx, int maxmcs); + +/* TODO Get the sorting order of a slice in UL, return value is number of elements + * in sorting_list */ +/*int flexran_get_ul_slice_sorting(mid_t mod_id, int slice_idx, Protocol__FlexUlSorting **sorting_list);*/ +/* TODO Set the sorting order of a slice in UL */ +/*void flexran_set_ul_slice_sorting(mid_t mod_id, int slice_idx, Protocol__FlexUlSorting *sorting_list, int n);*/ + +/* TODO Get the accounting policy for a slice in UL */ +/*Protocol__UlAccountingPolicy flexran_get_ul_slice_accounting_policy(mid_t mod_id, int slice_idx);*/ +/* TODO Set the accounting policy for a slice in UL */ +/*void flexran_get_ul_slice_accounting_policy(mid_t mod_id, int slice_idx, Protocol__UlAccountingPolicy accountin);*/ + +/* Get the scheduler name for a slice in UL */ +char *flexran_get_ul_slice_scheduler(mid_t mod_id, int slice_idx); +/* Set the scheduler name for a slice in UL */ +int flexran_set_ul_slice_scheduler(mid_t mod_id, int slice_idx, char *name); diff --git a/openair2/LAYER2/MAC/config.c b/openair2/LAYER2/MAC/config.c index 8fd85e78118f497a215eff748b1ffbbadb975d34..5331f07a6883a7ebbbb0b0bf4f432711a3660e38 100644 --- a/openair2/LAYER2/MAC/config.c +++ b/openair2/LAYER2/MAC/config.c @@ -733,7 +733,7 @@ rrc_mac_config_req_eNB(module_id_t Mod_idP, if (RC.mac == NULL) l2_init_eNB(); - mac_top_init_eNB(); + //mac_top_init_eNB(); RC.mac[Mod_idP]->common_channels[CC_idP].mib = mib; RC.mac[Mod_idP]->common_channels[CC_idP].physCellId = physCellId; @@ -852,16 +852,21 @@ rrc_mac_config_req_eNB(module_id_t Mod_idP, LOG_E(MAC, "%s:%d:%s: ERROR, UE_id == -1\n", __FILE__, __LINE__, __FUNCTION__); } else { - if (logicalChannelConfig) + if (logicalChannelConfig) { UE_list-> UE_template[CC_idP][UE_id].lcgidmap [logicalChannelIdentity] = *logicalChannelConfig-> - ul_SpecificParameters->logicalChannelGroup; - else - UE_list-> - UE_template[CC_idP][UE_id].lcgidmap - [logicalChannelIdentity] = 0; + ul_SpecificParameters->logicalChannelGroup; + UE_list-> + UE_template[CC_idP][UE_id].lcgidpriority + [logicalChannelIdentity]= + logicalChannelConfig->ul_SpecificParameters->priority; + + } else + UE_list-> + UE_template[CC_idP][UE_id].lcgidmap + [logicalChannelIdentity] = 0; } } diff --git a/openair2/LAYER2/MAC/eNB_scheduler.c b/openair2/LAYER2/MAC/eNB_scheduler.c index a2988f26d8c27b34265fe94635d625e2858935c0..4dbf3cf36f57d2c700463d071b02d805bb61a704 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler.c +++ b/openair2/LAYER2/MAC/eNB_scheduler.c @@ -703,6 +703,10 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP, frame_t frameP, allocate_CCEs(module_idP, CC_id, frameP, subframeP, 2); } + if (mac_agent_registered[module_idP] && subframeP == 9) { + flexran_agent_slice_update(module_idP); + } + stop_meas(&RC.mac[module_idP]->eNB_scheduler); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME diff --git a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c index b3021fcc999f0b5fb557fe24c39649e10304fc99..08cb89e56d485d265c106421ed1d278dd03efd74 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c @@ -29,6 +29,7 @@ */ +#define _GNU_SOURCE #include "LAYER2/MAC/mac.h" #include "LAYER2/MAC/mac_proto.h" @@ -54,10 +55,6 @@ #include "intertask_interface.h" #endif -#include "ENB_APP/flexran_agent_defs.h" -#include "flexran_agent_ran_api.h" -#include "header.pb-c.h" -#include "flexran.pb-c.h" #include <dlfcn.h> #include "T.h" @@ -65,54 +62,21 @@ #define ENABLE_MAC_PAYLOAD_DEBUG //#define DEBUG_eNB_SCHEDULER 1 +#include "common/ran_context.h" extern RAN_CONTEXT_t RC; extern uint8_t nfapi_mode; - -// number of active slices for past and current time -int n_active_slices = 1; -int n_active_slices_current = 1; - -// RB share for each slice for past and current time -float avg_slice_percentage=0.25; -float slice_percentage[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0}; -float slice_percentage_current[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0}; -float total_slice_percentage = 0; -float total_slice_percentage_current = 0; - -// MAX MCS for each slice for past and current time -int slice_maxmcs[MAX_NUM_SLICES] = { 28, 28, 28, 28 }; -int slice_maxmcs_current[MAX_NUM_SLICES] = { 28, 28, 28, 28 }; - -int update_dl_scheduler[MAX_NUM_SLICES] = { 1, 1, 1, 1 }; -int update_dl_scheduler_current[MAX_NUM_SLICES] = { 1, 1, 1, 1 }; - -// name of available scheduler -char *dl_scheduler_type[MAX_NUM_SLICES] = - { "schedule_ue_spec", - "schedule_ue_spec", - "schedule_ue_spec", - "schedule_ue_spec" - }; - -// The lists of criteria that enforce the sorting policies of the slices -uint32_t sorting_policy[MAX_NUM_SLICES] = {0x01234, 0x01234, 0x01234, 0x01234}; -uint32_t sorting_policy_current[MAX_NUM_SLICES] = {0x01234, 0x01234, 0x01234, 0x01234}; - -// pointer to the slice specific scheduler -slice_scheduler_dl slice_sched_dl[MAX_NUM_SLICES] = {0}; - //------------------------------------------------------------------------------ void add_ue_dlsch_info(module_id_t module_idP, - int CC_id, - int UE_id, sub_frame_t subframeP, UE_DLSCH_STATUS status) + int CC_id, + int UE_id, sub_frame_t subframeP, UE_DLSCH_STATUS status) //------------------------------------------------------------------------------ { //LOG_D(MAC, "%s(module_idP:%d, CC_id:%d, UE_id:%d, subframeP:%d, status:%d) serving_num:%d rnti:%x\n", __FUNCTION__, module_idP, CC_id, UE_id, subframeP, status, eNB_dlsch_info[module_idP][CC_id][UE_id].serving_num, UE_RNTI(module_idP,UE_id)); eNB_dlsch_info[module_idP][CC_id][UE_id].rnti = - UE_RNTI(module_idP, UE_id); + UE_RNTI(module_idP, UE_id); // eNB_dlsch_info[module_idP][CC_id][ue_mod_idP].weight = weight; eNB_dlsch_info[module_idP][CC_id][UE_id].subframe = subframeP; eNB_dlsch_info[module_idP][CC_id][UE_id].status = status; @@ -124,7 +88,7 @@ add_ue_dlsch_info(module_id_t module_idP, //------------------------------------------------------------------------------ int schedule_next_dlue(module_id_t module_idP, int CC_id, - sub_frame_t subframeP) + sub_frame_t subframeP) //------------------------------------------------------------------------------ { @@ -134,7 +98,7 @@ schedule_next_dlue(module_id_t module_idP, int CC_id, for (next_ue = UE_list->head; next_ue >= 0; next_ue = UE_list->next[next_ue]) { if (eNB_dlsch_info[module_idP][CC_id][next_ue].status == - S_DL_WAITING) { + S_DL_WAITING) { return next_ue; } } @@ -146,21 +110,21 @@ schedule_next_dlue(module_id_t module_idP, int CC_id, } } - return (-1); //next_ue; + return (-1); //next_ue; } //------------------------------------------------------------------------------ int generate_dlsch_header(unsigned char *mac_header, - unsigned char num_sdus, - unsigned short *sdu_lengths, - unsigned char *sdu_lcids, - unsigned char drx_cmd, - unsigned short timing_advance_cmd, - unsigned char *ue_cont_res_id, - unsigned char short_padding, - unsigned short post_padding) + unsigned char num_sdus, + unsigned short *sdu_lengths, + unsigned char *sdu_lcids, + unsigned char drx_cmd, + unsigned short timing_advance_cmd, + unsigned char *ue_cont_res_id, + unsigned char short_padding, + unsigned short post_padding) //------------------------------------------------------------------------------ { @@ -218,10 +182,10 @@ generate_dlsch_header(unsigned char *mac_header, // msg("last_size %d,mac_header_ptr %p\n",last_size,mac_header_ptr); ((TIMING_ADVANCE_CMD *) ce_ptr)->R = 0; AssertFatal(timing_advance_cmd < 64, - "timing_advance_cmd %d > 63\n", timing_advance_cmd); - ((TIMING_ADVANCE_CMD *) ce_ptr)->TA = timing_advance_cmd; //(timing_advance_cmd+31)&0x3f; + "timing_advance_cmd %d > 63\n", timing_advance_cmd); + ((TIMING_ADVANCE_CMD *) ce_ptr)->TA = timing_advance_cmd; //(timing_advance_cmd+31)&0x3f; LOG_D(MAC, "timing advance =%d (%d)\n", timing_advance_cmd, - ((TIMING_ADVANCE_CMD *) ce_ptr)->TA); + ((TIMING_ADVANCE_CMD *) ce_ptr)->TA); ce_ptr += sizeof(TIMING_ADVANCE_CMD); //msg("offset %d\n",ce_ptr-mac_header_control_elements); } @@ -246,9 +210,9 @@ generate_dlsch_header(unsigned char *mac_header, last_size = 1; LOG_T(MAC, - "[eNB ][RAPROC] Generate contention resolution msg: %x.%x.%x.%x.%x.%x\n", - ue_cont_res_id[0], ue_cont_res_id[1], ue_cont_res_id[2], - ue_cont_res_id[3], ue_cont_res_id[4], ue_cont_res_id[5]); + "[eNB ][RAPROC] Generate contention resolution msg: %x.%x.%x.%x.%x.%x\n", + ue_cont_res_id[0], ue_cont_res_id[1], ue_cont_res_id[2], + ue_cont_res_id[3], ue_cont_res_id[4], ue_cont_res_id[5]); memcpy(ce_ptr, ue_cont_res_id, 6); ce_ptr += 6; @@ -258,7 +222,7 @@ generate_dlsch_header(unsigned char *mac_header, for (i = 0; i < num_sdus; i++) { LOG_T(MAC, "[eNB] Generate DLSCH header num sdu %d len sdu %d\n", - num_sdus, sdu_lengths[i]); + num_sdus, sdu_lengths[i]); if (first_element > 0) { mac_header_ptr->E = 1; @@ -291,10 +255,10 @@ generate_dlsch_header(unsigned char *mac_header, last_size = 3; #ifdef DEBUG_HEADER_PARSING LOG_D(MAC, - "[eNB] generate long sdu, size %x (MSB %x, LSB %x)\n", - sdu_lengths[i], - ((SCH_SUBHEADER_LONG *) mac_header_ptr)->L_MSB, - ((SCH_SUBHEADER_LONG *) mac_header_ptr)->L_LSB); + "[eNB] generate long sdu, size %x (MSB %x, LSB %x)\n", + sdu_lengths[i], + ((SCH_SUBHEADER_LONG *) mac_header_ptr)->L_MSB, + ((SCH_SUBHEADER_LONG *) mac_header_ptr)->L_LSB); #endif } } @@ -316,7 +280,7 @@ generate_dlsch_header(unsigned char *mac_header, printf("F = 1, sdu len (L field) %d\n",(((SCH_SUBHEADER_LONG*)mac_header_ptr)->L)); } */ - if (post_padding > 0) { // we have lots of padding at the end of the packet + if (post_padding > 0) { // we have lots of padding at the end of the packet mac_header_ptr->E = 1; mac_header_ptr += last_size; // add a padding element @@ -324,7 +288,7 @@ generate_dlsch_header(unsigned char *mac_header, mac_header_ptr->E = 0; mac_header_ptr->LCID = SHORT_PADDING; mac_header_ptr++; - } else { // no end of packet padding + } else { // no end of packet padding // last SDU subhead is of fixed type (sdu length implicitly to be computed at UE) mac_header_ptr++; } @@ -334,9 +298,9 @@ generate_dlsch_header(unsigned char *mac_header, if ((ce_ptr - mac_header_control_elements) > 0) { // printf("Copying %d bytes for control elements\n",ce_ptr-mac_header_control_elements); memcpy((void *) mac_header_ptr, mac_header_control_elements, - ce_ptr - mac_header_control_elements); + ce_ptr - mac_header_control_elements); mac_header_ptr += - (unsigned char) (ce_ptr - mac_header_control_elements); + (unsigned char) (ce_ptr - mac_header_control_elements); } //msg("After CEs %d\n",(uint8_t*)mac_header_ptr - mac_header); @@ -346,7 +310,7 @@ generate_dlsch_header(unsigned char *mac_header, //------------------------------------------------------------------------------ void set_ul_DAI(int module_idP, int UE_idP, int CC_idP, int frameP, - int subframeP) + int subframeP) //------------------------------------------------------------------------------ { @@ -354,202 +318,105 @@ set_ul_DAI(int module_idP, int UE_idP, int CC_idP, int frameP, UE_list_t *UE_list = &eNB->UE_list; unsigned char DAI; COMMON_channels_t *cc = &eNB->common_channels[CC_idP]; - if (cc->tdd_Config != NULL) { //TDD + if (cc->tdd_Config != NULL) { //TDD DAI = (UE_list->UE_template[CC_idP][UE_idP].DAI - 1) & 3; LOG_D(MAC, - "[eNB %d] CC_id %d Frame %d, subframe %d: DAI %d for UE %d\n", - module_idP, CC_idP, frameP, subframeP, DAI, UE_idP); + "[eNB %d] CC_id %d Frame %d, subframe %d: DAI %d for UE %d\n", + module_idP, CC_idP, frameP, subframeP, DAI, UE_idP); // Save DAI for Format 0 DCI switch (cc->tdd_Config->subframeAssignment) { - case 0: - // if ((subframeP==0)||(subframeP==1)||(subframeP==5)||(subframeP==6)) - break; - - case 1: - switch (subframeP) { case 0: - case 1: - UE_list->UE_template[CC_idP][UE_idP].DAI_ul[7] = DAI; - break; - - case 4: - UE_list->UE_template[CC_idP][UE_idP].DAI_ul[8] = DAI; + // if ((subframeP==0)||(subframeP==1)||(subframeP==5)||(subframeP==6)) break; - case 5: - case 6: - UE_list->UE_template[CC_idP][UE_idP].DAI_ul[2] = DAI; - break; - - case 9: - UE_list->UE_template[CC_idP][UE_idP].DAI_ul[3] = DAI; + case 1: + switch (subframeP) { + case 0: + case 1: + UE_list->UE_template[CC_idP][UE_idP].DAI_ul[7] = DAI; break; - } - break; - - case 2: - // if ((subframeP==3)||(subframeP==8)) - // UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI; - break; - case 3: + case 4: + UE_list->UE_template[CC_idP][UE_idP].DAI_ul[8] = DAI; + break; - //if ((subframeP==6)||(subframeP==8)||(subframeP==0)) { - // LOG_D(MAC,"schedule_ue_spec: setting UL DAI to %d for subframeP %d => %d\n",DAI,subframeP, ((subframeP+8)%10)>>1); - // UE_list->UE_template[CC_idP][UE_idP].DAI_ul[((subframeP+8)%10)>>1] = DAI; - //} - switch (subframeP) { - case 5: - case 6: - case 1: - UE_list->UE_template[CC_idP][UE_idP].DAI_ul[2] = DAI; - break; + case 5: + case 6: + UE_list->UE_template[CC_idP][UE_idP].DAI_ul[2] = DAI; + break; - case 7: - case 8: - UE_list->UE_template[CC_idP][UE_idP].DAI_ul[3] = DAI; - break; + case 9: + UE_list->UE_template[CC_idP][UE_idP].DAI_ul[3] = DAI; + break; + } - case 9: - case 0: - UE_list->UE_template[CC_idP][UE_idP].DAI_ul[4] = DAI; - break; + case 2: + // if ((subframeP==3)||(subframeP==8)) + // UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI; + break; - default: - break; - } + case 3: + + //if ((subframeP==6)||(subframeP==8)||(subframeP==0)) { + // LOG_D(MAC,"schedule_ue_spec: setting UL DAI to %d for subframeP %d => %d\n",DAI,subframeP, ((subframeP+8)%10)>>1); + // UE_list->UE_template[CC_idP][UE_idP].DAI_ul[((subframeP+8)%10)>>1] = DAI; + //} + switch (subframeP) { + case 5: + case 6: + case 1: + UE_list->UE_template[CC_idP][UE_idP].DAI_ul[2] = DAI; + break; + + case 7: + case 8: + UE_list->UE_template[CC_idP][UE_idP].DAI_ul[3] = DAI; + break; + + case 9: + case 0: + UE_list->UE_template[CC_idP][UE_idP].DAI_ul[4] = DAI; + break; + + default: + break; + } - break; + break; - case 4: - // if ((subframeP==8)||(subframeP==9)) - // UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI; - break; + case 4: + // if ((subframeP==8)||(subframeP==9)) + // UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI; + break; - case 5: - // if (subframeP==8) - // UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI; - break; + case 5: + // if (subframeP==8) + // UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI; + break; - case 6: - // if ((subframeP==1)||(subframeP==4)||(subframeP==6)||(subframeP==9)) - // UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI; - break; + case 6: + // if ((subframeP==1)||(subframeP==4)||(subframeP==6)||(subframeP==9)) + // UE_list->UE_template[CC_idP][UE_idP].DAI_ul = DAI; + break; - default: - break; + default: + break; } } } //------------------------------------------------------------------------------ void -schedule_dlsch(module_id_t module_idP, - frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag) -//------------------------------------------------------------------------------{ -{ +schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag) { int i = 0; + slice_info_t *sli = &RC.mac[module_idP]->slice_info; + memset(sli->rballoc_sub, 0, sizeof(sli->rballoc_sub)); - total_slice_percentage=0; - avg_slice_percentage=1.0/n_active_slices; - - // reset the slice percentage for inactive slices - for (i = n_active_slices; i< MAX_NUM_SLICES; i++) { - slice_percentage[i]=0; - } - for (i = 0; i < n_active_slices; i++) { - if (slice_percentage[i] < 0 ){ - LOG_W(MAC, "[eNB %d] frame %d subframe %d:invalid slice %d percentage %f. resetting to zero", - module_idP, frameP, subframeP, i, slice_percentage[i]); - slice_percentage[i]=0; - } - total_slice_percentage+=slice_percentage[i]; - } - - for (i = 0; i < n_active_slices; i++) { - - // Load any updated functions - if (update_dl_scheduler[i] > 0 ) { - slice_sched_dl[i] = dlsym(NULL, dl_scheduler_type[i]); - update_dl_scheduler[i] = 0 ; - update_dl_scheduler_current[i] = 0; - LOG_I(MAC,"update dl scheduler slice %d\n", i); - } - - if (total_slice_percentage <= 1.0){ // the new total RB share is within the range - - // check if the number of slices has changed, and log - if (n_active_slices_current != n_active_slices ){ - if ((n_active_slices > 0) && (n_active_slices <= MAX_NUM_SLICES)) { - LOG_I(MAC,"[eNB %d]frame %d subframe %d: number of active DL slices has changed: %d-->%d\n", - module_idP, frameP, subframeP, n_active_slices_current, n_active_slices); - - n_active_slices_current = n_active_slices; - - } else { - LOG_W(MAC,"invalid number of DL slices %d, revert to the previous value %d\n",n_active_slices, n_active_slices_current); - n_active_slices = n_active_slices_current; - } - } - - // check if the slice rb share has changed, and log the console - if (slice_percentage_current[i] != slice_percentage[i]){ // new slice percentage - LOG_I(MAC,"[eNB %d][SLICE %d][DL] frame %d subframe %d: total percentage %f-->%f, slice RB percentage has changed: %f-->%f\n", - module_idP, i, frameP, subframeP, total_slice_percentage_current, total_slice_percentage, slice_percentage_current[i], slice_percentage[i]); - total_slice_percentage_current= total_slice_percentage; - slice_percentage_current[i] = slice_percentage[i]; - - } - - // check if the slice max MCS, and log the console - if (slice_maxmcs_current[i] != slice_maxmcs[i]){ - if ((slice_maxmcs[i] >= 0) && (slice_maxmcs[i] < 29)){ - LOG_I(MAC,"[eNB %d][SLICE %d][DL] frame %d subframe %d: slice MAX MCS has changed: %d-->%d\n", - module_idP, i, frameP, subframeP, slice_maxmcs_current[i], slice_maxmcs[i]); - slice_maxmcs_current[i] = slice_maxmcs[i]; - } else { - LOG_W(MAC,"[eNB %d][SLICE %d][DL] invalid slice max mcs %d, revert the previous value %d\n",module_idP, i, slice_maxmcs[i],slice_maxmcs_current[i]); - slice_maxmcs[i]= slice_maxmcs_current[i]; - } - } - - // check if a new scheduler, and log the console - if (update_dl_scheduler_current[i] != update_dl_scheduler[i]){ - LOG_I(MAC,"[eNB %d][SLICE %d][DL] frame %d subframe %d: DL scheduler for this slice is updated: %s \n", - module_idP, i, frameP, subframeP, dl_scheduler_type[i]); - update_dl_scheduler_current[i] = update_dl_scheduler[i]; - } - - } else { - // here we can correct the values, e.g. reduce proportionally - - if (n_active_slices == n_active_slices_current){ - LOG_W(MAC,"[eNB %d][SLICE %d][DL] invalid total RB share (%f->%f), reduce proportionally the RB share by 0.1\n", - module_idP, i, total_slice_percentage_current, total_slice_percentage); - if (slice_percentage[i] >= avg_slice_percentage){ - slice_percentage[i]-=0.1; - total_slice_percentage-=0.1; - } - } else { - LOG_W(MAC,"[eNB %d][SLICE %d][DL] invalid total RB share (%f->%f), revert the number of slice to its previous value (%d->%d)\n", - module_idP, i, total_slice_percentage_current, total_slice_percentage, - n_active_slices, n_active_slices_current ); - n_active_slices = n_active_slices_current; - slice_percentage[i] = slice_percentage_current[i]; - } - } - - // Check for new sorting policy - if (sorting_policy_current[i] != sorting_policy[i]) { - LOG_I(MAC,"[eNB %d][SLICE %d][DL] frame %d subframe %d: UE sorting policy has changed (%x-->%x)\n", - module_idP, i, frameP, subframeP, sorting_policy_current[i], sorting_policy[i]); - sorting_policy_current[i] = sorting_policy[i]; - } - + for (i = 0; i < sli->n_dl; i++) { // Run each enabled slice-specific schedulers one by one - slice_sched_dl[i](module_idP, i, frameP, subframeP, mbsfn_flag/*, dl_info*/); + sli->dl[i].sched_cb(module_idP, i, frameP, subframeP, mbsfn_flag/*, dl_info*/); } } @@ -558,8 +425,8 @@ schedule_dlsch(module_id_t module_idP, //------------------------------------------------------------------------------ void -schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, - frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag) +schedule_ue_spec(module_id_t module_idP, int slice_idxP, + frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag) //------------------------------------------------------------------------------ { int CC_id; @@ -587,10 +454,10 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, UE_sched_ctrl *ue_sched_ctl; int mcs; int i; - int min_rb_unit[MAX_NUM_CCs]; - int N_RB_DL[MAX_NUM_CCs]; - int total_nb_available_rb[MAX_NUM_CCs]; - int N_RBG[MAX_NUM_CCs]; + int min_rb_unit[NFAPI_CC_MAX]; + int N_RB_DL[NFAPI_CC_MAX]; + int total_nb_available_rb[NFAPI_CC_MAX]; + int N_RBG[NFAPI_CC_MAX]; nfapi_dl_config_request_body_t *dl_req; nfapi_dl_config_request_pdu_t *dl_config_pdu; int tdd_sfa; @@ -599,61 +466,58 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, int header_length_total; start_meas(&eNB->schedule_dlsch); - VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME - (VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH, VCD_FUNCTION_IN); - + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH, VCD_FUNCTION_IN); // for TDD: check that we have to act here, otherwise return if (cc[0].tdd_Config) { tdd_sfa = cc[0].tdd_Config->subframeAssignment; switch (subframeP) { - case 0: - // always continue - break; - case 1: - return; - break; - case 2: - return; - break; - case 3: - if ((tdd_sfa != 2) && (tdd_sfa != 5)) - return; - break; - case 4: - if ((tdd_sfa != 1) && (tdd_sfa != 2) && (tdd_sfa != 4) - && (tdd_sfa != 5)) - return; - break; - case 5: - break; - case 6: - case 7: - if ((tdd_sfa != 3) && (tdd_sfa != 4) && (tdd_sfa != 5)) - return; - break; - case 8: - if ((tdd_sfa != 2) && (tdd_sfa != 3) && (tdd_sfa != 4) - && (tdd_sfa != 5)) - return; - break; - case 9: - if (tdd_sfa == 0) - return; - break; - + case 0: + // always continue + break; + case 1: + return; + break; + case 2: + return; + break; + case 3: + if ((tdd_sfa != 2) && (tdd_sfa != 5)) + return; + break; + case 4: + if ((tdd_sfa != 1) && (tdd_sfa != 2) && (tdd_sfa != 4) + && (tdd_sfa != 5)) + return; + break; + case 5: + break; + case 6: + case 7: + if ((tdd_sfa != 3) && (tdd_sfa != 4) && (tdd_sfa != 5)) + return; + break; + case 8: + if ((tdd_sfa != 2) && (tdd_sfa != 3) && (tdd_sfa != 4) + && (tdd_sfa != 5)) + return; + break; + case 9: + if (tdd_sfa == 0) + return; + break; } } //weight = get_ue_weight(module_idP,UE_id); aggregation = 2; - for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) { + for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) { N_RB_DL[CC_id] = to_prb(cc[CC_id].mib->message.dl_Bandwidth); min_rb_unit[CC_id] = get_min_rb_unit(module_idP, CC_id); // get number of PRBs less those used by common channels total_nb_available_rb[CC_id] = N_RB_DL[CC_id]; for (i = 0; i < N_RB_DL[CC_id]; i++) if (cc[CC_id].vrb_map[i] != 0) - total_nb_available_rb[CC_id]--; + total_nb_available_rb[CC_id]--; N_RBG[CC_id] = to_rbg(cc[CC_id].mib->message.dl_Bandwidth); @@ -665,22 +529,37 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, eNB->eNB_stats[CC_id].dlsch_pdus_tx = 0; } - /// CALLING Pre_Processor for downlink scheduling (Returns estimation of RBs required by each UE and the allocation on sub-band) + // CALLING Pre_Processor for downlink scheduling + // (Returns estimation of RBs required by each UE and the allocation on sub-band) + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_FUNCTION_IN); - VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME - (VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_FUNCTION_IN); start_meas(&eNB->schedule_dlsch_preprocessor); dlsch_scheduler_pre_processor(module_idP, - slice_idP, + slice_idxP, frameP, subframeP, - N_RBG, - mbsfn_flag); + mbsfn_flag, + eNB->slice_info.rballoc_sub); stop_meas(&eNB->schedule_dlsch_preprocessor); - VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME - (VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_FUNCTION_OUT); - for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) { + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_FUNCTION_OUT); + + //RC.mac[module_idP]->slice_info.slice_counter--; + // Do the multiplexing and actual allocation only when all slices have been pre-processed. + //if (RC.mac[module_idP]->slice_info.slice_counter > 0) { + //stop_meas(&eNB->schedule_dlsch); + //VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH, VCD_FUNCTION_OUT); + //return; + //} + + if (RC.mac[module_idP]->slice_info.interslice_share_active) { + dlsch_scheduler_interslice_multiplexing(module_idP, frameP, subframeP, eNB->slice_info.rballoc_sub); + /* the interslice multiplexing re-sorts the UE_list for the slices it tries + * to multiplex, so we need to sort it for the current slice again */ + sort_UEs(module_idP, slice_idxP, frameP, subframeP); + } + + for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) { LOG_D(MAC, "doing schedule_ue_spec for CC_id %d\n", CC_id); dl_req = &eNB->DL_req[CC_id].dl_config_request_body; @@ -688,49 +567,51 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, if (mbsfn_flag[CC_id] > 0) continue; - for (UE_id = UE_list->head; UE_id >= 0; - UE_id = UE_list->next[UE_id]) { - continue_flag = 0; // reset the flag to allow allocation for the remaining UEs + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + LOG_D(MAC, "doing schedule_ue_spec for CC_id %d UE %d\n", CC_id, UE_id); + continue_flag = 0; // reset the flag to allow allocation for the remaining UEs rnti = UE_RNTI(module_idP, UE_id); eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id]; ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; - if (rnti == NOT_A_RNTI) { - LOG_D(MAC, "Cannot find rnti for UE_id %d (num_UEs %d)\n", - UE_id, UE_list->num_UEs); - continue_flag = 1; + LOG_D(MAC, "Cannot find rnti for UE_id %d (num_UEs %d)\n", UE_id, UE_list->num_UEs); + continue_flag = 1; } if (eNB_UE_stats == NULL) { - LOG_D(MAC, "[eNB] Cannot find eNB_UE_stats\n"); - continue_flag = 1; + LOG_D(MAC, "[eNB] Cannot find eNB_UE_stats\n"); + continue_flag = 1; + } + + if (!ue_dl_slice_membership(module_idP, UE_id, slice_idxP)) { + LOG_D(MAC, "UE%d is not part of slice %d ID %d\n", + UE_id, slice_idxP, RC.mac[module_idP]->slice_info.dl[slice_idxP].id); + /* prevent execution of add_ue_dlsch_info(), it is done by the other + * slice */ + continue; } - if (!ue_slice_membership(UE_id, slice_idP)) - continue; if (continue_flag != 1) { - switch (get_tmode(module_idP, CC_id, UE_id)) { - case 1: - case 2: - case 7: - aggregation = - get_aggregation(get_bw_index(module_idP, CC_id), - ue_sched_ctl->dl_cqi[CC_id], - format1); - break; - case 3: - aggregation = - get_aggregation(get_bw_index(module_idP, CC_id), - ue_sched_ctl->dl_cqi[CC_id], - format2A); - break; - default: - LOG_W(MAC, "Unsupported transmission mode %d\n", - get_tmode(module_idP, CC_id, UE_id)); - aggregation = 2; - } + switch (get_tmode(module_idP, CC_id, UE_id)) { + case 1: + case 2: + case 7: + aggregation = get_aggregation(get_bw_index(module_idP, CC_id), + ue_sched_ctl->dl_cqi[CC_id], + format1); + break; + case 3: + aggregation = get_aggregation(get_bw_index(module_idP, CC_id), + ue_sched_ctl->dl_cqi[CC_id], + format2A); + break; + default: + LOG_W(MAC, "Unsupported transmission mode %d\n", get_tmode(module_idP, CC_id, UE_id)); + aggregation = 2; + } } + /* if (continue_flag != 1 */ if ((ue_sched_ctl->pre_nb_available_rbs[CC_id] == 0) || // no RBs allocated CCE_allocation_infeasible(module_idP, CC_id, 1, subframeP, @@ -741,78 +622,74 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, continue_flag = 1; //to next user (there might be rbs availiable for other UEs in TM5 } - if (cc[CC_id].tdd_Config != NULL) { //TDD - set_ue_dai(subframeP, - UE_id, - CC_id, - cc[CC_id].tdd_Config->subframeAssignment, - UE_list); - // update UL DAI after DLSCH scheduling - set_ul_DAI(module_idP, UE_id, CC_id, frameP, subframeP); + // If TDD + if (cc[CC_id].tdd_Config != NULL) { //TDD + set_ue_dai(subframeP, + UE_id, + CC_id, + cc[CC_id].tdd_Config->subframeAssignment, + UE_list); + // update UL DAI after DLSCH scheduling + set_ul_DAI(module_idP, UE_id, CC_id, frameP, subframeP); } if (continue_flag == 1) { - add_ue_dlsch_info(module_idP, - CC_id, UE_id, subframeP, S_DL_NONE); - continue; + add_ue_dlsch_info(module_idP, CC_id, UE_id, subframeP, S_DL_NONE); + continue; } nb_available_rb = ue_sched_ctl->pre_nb_available_rbs[CC_id]; - harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP ,subframeP); round = ue_sched_ctl->round[CC_id][harq_pid]; - UE_list->eNB_UE_stats[CC_id][UE_id].crnti = rnti; + UE_list->eNB_UE_stats[CC_id][UE_id].crnti = rnti; UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status = mac_eNB_get_rrc_status(module_idP, rnti); - UE_list->eNB_UE_stats[CC_id][UE_id].harq_pid = harq_pid; + UE_list->eNB_UE_stats[CC_id][UE_id].harq_pid = harq_pid; UE_list->eNB_UE_stats[CC_id][UE_id].harq_round = round; - - if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_CONNECTED) continue; + if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_CONNECTED) { + LOG_D(MAC, "UE %d is not in RRC_CONNECTED\n", UE_id); + continue; + } header_length_total = 0; sdu_length_total = 0; num_sdus = 0; /* - DevCheck(((eNB_UE_stats->dl_cqi < MIN_CQI_VALUE) || (eNB_UE_stats->dl_cqi > MAX_CQI_VALUE)), - eNB_UE_stats->dl_cqi, MIN_CQI_VALUE, MAX_CQI_VALUE); + DevCheck(((eNB_UE_stats->dl_cqi < MIN_CQI_VALUE) || + (eNB_UE_stats->dl_cqi > MAX_CQI_VALUE)), + eNB_UE_stats->dl_cqi, MIN_CQI_VALUE, MAX_CQI_VALUE); */ if (nfapi_mode) { - eNB_UE_stats->dlsch_mcs1 = 10;//cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]]; + eNB_UE_stats->dlsch_mcs1 = 10; // cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]]; + } else { // this operation is also done in the preprocessor + eNB_UE_stats->dlsch_mcs1 = cmin(eNB_UE_stats->dlsch_mcs1, + eNB->slice_info.dl[slice_idxP].maxmcs); // cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs); } - else { // this operation is also done in the preprocessor - eNB_UE_stats->dlsch_mcs1 = cmin(eNB_UE_stats->dlsch_mcs1, slice_maxmcs[slice_idP]); //cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs); - } - - - // store stats - //UE_list->eNB_UE_stats[CC_id][UE_id].dl_cqi= eNB_UE_stats->dl_cqi; + // Store stats + // UE_list->eNB_UE_stats[CC_id][UE_id].dl_cqi= eNB_UE_stats->dl_cqi; - // initializing the rb allocation indicator for each UE + // Initializing the rb allocation indicator for each UE for (j = 0; j < N_RBG[CC_id]; j++) { - UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = 0; + UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = 0; } LOG_D(MAC, - "[eNB %d] Frame %d: Scheduling UE %d on CC_id %d (rnti %x, harq_pid %d, round %d, rb %d, cqi %d, mcs %d, rrc %d)\n", - module_idP, frameP, UE_id, CC_id, rnti, harq_pid, round, - nb_available_rb, ue_sched_ctl->dl_cqi[CC_id], - eNB_UE_stats->dlsch_mcs1, - UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status); - - + "[eNB %d] Frame %d: Scheduling UE %d on CC_id %d (rnti %x, harq_pid %d, round %d, rb %d, cqi %d, mcs %d, rrc %d)\n", + module_idP, frameP, UE_id, CC_id, rnti, harq_pid, round, + nb_available_rb, ue_sched_ctl->dl_cqi[CC_id], + eNB_UE_stats->dlsch_mcs1, + UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status); /* process retransmission */ - if (round != 8) { - // get freq_allocation - nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid]; - TBS = get_TBS_DL(UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid], - nb_rb); + // get freq_allocation + nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid]; + TBS = get_TBS_DL(UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid], nb_rb); if (nb_rb <= nb_available_rb) { if (cc[CC_id].tdd_Config != NULL) { @@ -825,82 +702,82 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, UE_list->UE_template[CC_id][UE_id].DAI); } - if (nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) { - for (j = 0; j < N_RBG[CC_id]; j++) { // for indicating the rballoc for each sub-band - UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; - } - } else { - nb_rb_temp = nb_rb; - j = 0; - - while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) { - if (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) { - if (UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j]) - printf("WARN: rballoc_subband not free for retrans?\n"); - UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; - - if ((j == N_RBG[CC_id] - 1) && - ((N_RB_DL[CC_id] == 25) || - (N_RB_DL[CC_id] == 50))) { - nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id] + 1; - } else { - nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]; - } - } - - j = j + 1; - } - } - - nb_available_rb -= nb_rb; - /* - eNB->mu_mimo_mode[UE_id].pre_nb_available_rbs = nb_rb; - eNB->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id]; + if (nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) { + for (j = 0; j < N_RBG[CC_id]; ++j) { // for indicating the rballoc for each sub-band + UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; + } + } else { + nb_rb_temp = nb_rb; + j = 0; + + while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) { + if (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) { + if (UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j]) + printf("WARN: rballoc_subband not free for retrans?\n"); + UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; + + if ((j == N_RBG[CC_id] - 1) && ((N_RB_DL[CC_id] == 25) || (N_RB_DL[CC_id] == 50))) { + nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id] + 1; + } else { + nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]; + } + } - for(j=0; j<N_RBG[CC_id]; j++) { - eNB->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j]; - } - */ - - switch (get_tmode(module_idP, CC_id, UE_id)) { - case 1: - case 2: - case 7: - default: - LOG_D(MAC,"retransmission DL_REQ: rnti:%x\n",rnti); - - dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; - memset((void *) dl_config_pdu, 0, - sizeof(nfapi_dl_config_request_pdu_t)); - dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; - dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu)); - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = get_aggregation(get_bw_index(module_idP, CC_id), - ue_sched_ctl->dl_cqi[CC_id], - format1); - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power - - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // dont adjust power when retransmitting - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = round & 3; - - if (cc[CC_id].tdd_Config != NULL) { //TDD - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (UE_list->UE_template[CC_id][UE_id].DAI - 1) & 3; - LOG_D(MAC, - "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, dai %d, mcs %d\n", - module_idP, CC_id, harq_pid, round, - (UE_list->UE_template[CC_id][UE_id].DAI - 1), - UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]); - } else { - LOG_D(MAC, - "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, mcs %d\n", - module_idP, CC_id, harq_pid, round, - UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]); + j = j + 1; + } + } + + nb_available_rb -= nb_rb; + /* + eNB->mu_mimo_mode[UE_id].pre_nb_available_rbs = nb_rb; + eNB->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id]; + + for(j = 0; j < N_RBG[CC_id]; ++j) { + eNB->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j]; + } + */ + + switch (get_tmode(module_idP, CC_id, UE_id)) { + case 1: + case 2: + case 7: + default: + LOG_D(MAC, "retransmission DL_REQ: rnti:%x\n", rnti); + + dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; + memset((void *) dl_config_pdu, 0, sizeof(nfapi_dl_config_request_pdu_t)); + dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; + dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu)); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = + get_aggregation(get_bw_index(module_idP, CC_id), + ue_sched_ctl->dl_cqi[CC_id], + format1); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI: see Table 4-10 from SCF082 - nFAPI specifications + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power + + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // Don't adjust power when retransmitting + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = round & 3; + + // TDD + if (cc[CC_id].tdd_Config != NULL) { + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = + (UE_list->UE_template[CC_id][UE_id].DAI - 1) & 3; + LOG_D(MAC, + "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, dai %d, mcs %d\n", + module_idP, CC_id, harq_pid, round, + (UE_list->UE_template[CC_id][UE_id].DAI - 1), + UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]); + } else { + LOG_D(MAC, + "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, mcs %d\n", + module_idP, CC_id, harq_pid, round, + UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]); } if (!CCE_allocation_infeasible(module_idP, CC_id, 1, subframeP, @@ -912,48 +789,44 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, eNB->DL_req[CC_id].sfn_sf = frameP<<4 | subframeP; eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST; - fill_nfapi_dlsch_config(eNB, dl_req, TBS, -1 - /* retransmission, no pdu_index */ - , rnti, 0, // type 0 allocation from 7.1.6 in 36.213 - 0, // virtual_resource_block_assignment_flag, unused here - 0, // resource_block_coding, to be filled in later - getQm(UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]), round & 3, // redundancy version - 1, // transport blocks - 0, // transport block to codeword swap flag - cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme - 1, // number of layers - 1, // number of subbands - // uint8_t codebook_index, - 4, // UE category capacity - UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, 0, // delta_power_offset for TM5 - 0, // ngap - 0, // nprb - cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode - 0, //number of PRBs treated as one subband, not used here - 0 // number of beamforming vectors, not used here - ); - - LOG_D(MAC, - "Filled NFAPI configuration for DCI/DLSCH %d, retransmission round %d\n", - eNB->pdu_index[CC_id], round); - - program_dlsch_acknak(module_idP, CC_id, UE_id, - frameP, subframeP, - dl_config_pdu-> - dci_dl_pdu.dci_dl_pdu_rel8. - cce_idx); - // No TX request for retransmission (check if null request for FAPI) - } else { - LOG_W(MAC, - "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d\%x, infeasible CCE allocation\n", - frameP, subframeP, UE_id, rnti); - } - } - + fill_nfapi_dlsch_config(eNB, dl_req, TBS, -1, + /* retransmission, no pdu_index */ + rnti, 0, // type 0 allocation from 7.1.6 in 36.213 + 0, // virtual_resource_block_assignment_flag, unused here + 0, // resource_block_coding, to be filled in later + getQm(UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]), + round & 3, // redundancy version + 1, // transport blocks + 0, // transport block to codeword swap flag + cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme + 1, // number of layers + 1, // number of subbands + // uint8_t codebook_index, + 4, // UE category capacity + UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, + 0, // delta_power_offset for TM5 + 0, // ngap + 0, // nprb + cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode + 0, //number of PRBs treated as one subband, not used here + 0 // number of beamforming vectors, not used here + ); + + LOG_D(MAC, + "Filled NFAPI configuration for DCI/DLSCH %d, retransmission round %d\n", + eNB->pdu_index[CC_id], round); + + program_dlsch_acknak(module_idP, CC_id, UE_id, frameP, subframeP, + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx); + // No TX request for retransmission (check if null request for FAPI) + } else { + LOG_W(MAC, + "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d\%x, infeasible CCE allocation\n", + frameP, subframeP, UE_id, rnti); + } + } - add_ue_dlsch_info(module_idP, - CC_id, UE_id, subframeP, - S_DL_SCHEDULED); + add_ue_dlsch_info(module_idP, CC_id, UE_id, subframeP, S_DL_SCHEDULED); //eNB_UE_stats->dlsch_trials[round]++; UE_list->eNB_UE_stats[CC_id][UE_id].num_retransmission += 1; @@ -975,17 +848,18 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, // add the length for all the control elements (timing adv, drx, etc) : header + payload - if (ue_sched_ctl->ta_timer == 0) { - ta_update = ue_sched_ctl->ta_update; - /* if we send TA then set timer to not send it for a while */ - if (ta_update != 31) ue_sched_ctl->ta_timer = 20; - /* reset ta_update */ - ue_sched_ctl->ta_update = 31; - } else { - ta_update = 31; - } + if (ue_sched_ctl->ta_timer == 0) { + ta_update = ue_sched_ctl->ta_update; + /* if we send TA then set timer to not send it for a while */ + if (ta_update != 31) + ue_sched_ctl->ta_timer = 20; + /* reset ta_update */ + ue_sched_ctl->ta_update = 31; + } else { + ta_update = 31; + } - ta_len = (ta_update != 31) ? 2 : 0; + ta_len = (ta_update != 31) ? 2 : 0; // RLC data on DCCH if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { @@ -996,7 +870,7 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, #endif ); - sdu_lengths[0] = 0; + sdu_lengths[0] = 0; if (rlc_status.bytes_in_buffer > 0) { LOG_D(MAC, "[eNB %d] SFN/SF %d.%d, DL-DCCH->DLSCH CC_id %d, Requesting %d bytes from RLC (RRC message)\n", @@ -1062,10 +936,10 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, } } - T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP), - T_INT(CC_id), T_INT(rnti), T_INT(frameP), - T_INT(subframeP), T_INT(harq_pid), T_INT(DCCH), - T_INT(sdu_lengths[0])); + T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP), + T_INT(CC_id), T_INT(rnti), T_INT(frameP), + T_INT(subframeP), T_INT(harq_pid), T_INT(DCCH), + T_INT(sdu_lengths[0])); LOG_D(MAC, "[eNB %d][DCCH] CC_id %d Got %d bytes from RLC\n", module_idP, CC_id, sdu_lengths[0]); @@ -1083,15 +957,15 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, num_sdus = 1; #ifdef DEBUG_eNB_SCHEDULER - LOG_T(MAC, - "[eNB %d][DCCH] CC_id %d Got %d bytes :", - module_idP, CC_id, sdu_lengths[0]); + LOG_T(MAC, + "[eNB %d][DCCH] CC_id %d Got %d bytes :", + module_idP, CC_id, sdu_lengths[0]); - for (j = 0; j < sdu_lengths[0]; j++) { - LOG_T(MAC, "%x ", dlsch_buffer[j]); - } + for (j = 0; j < sdu_lengths[0]; ++j) { + LOG_T(MAC, "%x ", dlsch_buffer[j]); + } - LOG_T(MAC, "\n"); + LOG_T(MAC, "\n"); #endif } } @@ -1121,10 +995,10 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, #endif ); - T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP), - T_INT(CC_id), T_INT(rnti), T_INT(frameP), - T_INT(subframeP), T_INT(harq_pid), - T_INT(DCCH + 1), T_INT(sdu_lengths[num_sdus])); + T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP), + T_INT(CC_id), T_INT(rnti), T_INT(frameP), + T_INT(subframeP), T_INT(harq_pid), + T_INT(DCCH + 1), T_INT(sdu_lengths[num_sdus])); sdu_lcids[num_sdus] = DCCH1; sdu_length_total += sdu_lengths[num_sdus]; @@ -1139,15 +1013,15 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, num_sdus++; #ifdef DEBUG_eNB_SCHEDULER - LOG_T(MAC, - "[eNB %d][DCCH1] CC_id %d Got %d bytes :", - module_idP, CC_id, sdu_lengths[num_sdus]); + LOG_T(MAC, + "[eNB %d][DCCH1] CC_id %d Got %d bytes :", + module_idP, CC_id, sdu_lengths[num_sdus]); - for (j = 0; j < sdu_lengths[num_sdus]; j++) { - LOG_T(MAC, "%x ", dlsch_buffer[j]); - } + for (j = 0; j < sdu_lengths[num_sdus]; ++j) { + LOG_T(MAC, "%x ", dlsch_buffer[j]); + } - LOG_T(MAC, "\n"); + LOG_T(MAC, "\n"); #endif } @@ -1236,15 +1110,14 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, // Now compute number of required RBs for total sdu length // Assume RAH format 2 - mcs = eNB_UE_stats->dlsch_mcs1; + mcs = eNB_UE_stats->dlsch_mcs1; - if (mcs == 0) { - nb_rb = 4; // don't let the TBS get too small - } else { - nb_rb = min_rb_unit[CC_id]; - } - - TBS = get_TBS_DL(mcs, nb_rb); + if (mcs == 0) { + nb_rb = 4; // don't let the TBS get too small + } else { + nb_rb = min_rb_unit[CC_id]; + } + TBS = get_TBS_DL(mcs, nb_rb); while (TBS < sdu_length_total + header_length_total + ta_len) { nb_rb += min_rb_unit[CC_id]; // @@ -1256,33 +1129,31 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, break; } - TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rb); - } + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rb); + } - if (nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) { - for (j = 0; j < N_RBG[CC_id]; j++) { // for indicating the rballoc for each sub-band - UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; - } - } else { - nb_rb_temp = nb_rb; - j = 0; - - while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) { - if (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) { - UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; - - if ((j == N_RBG[CC_id] - 1) && - ((N_RB_DL[CC_id] == 25) || - (N_RB_DL[CC_id] == 50))) { - nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id] + 1; - } else { - nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]; - } - } + if (nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) { + for (j = 0; j < N_RBG[CC_id]; ++j) { // for indicating the rballoc for each sub-band + UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; + } + } else { + nb_rb_temp = nb_rb; + j = 0; + + while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) { + if (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) { + UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; + + if ((j == N_RBG[CC_id] - 1) && ((N_RB_DL[CC_id] == 25) || (N_RB_DL[CC_id] == 50))) { + nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id] + 1; + } else { + nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]; + } + } - j = j + 1; - } - } + j = j + 1; + } + } // decrease mcs until TBS falls below required length while ((TBS > sdu_length_total + header_length_total + ta_len) && (mcs > 0)) { @@ -1300,16 +1171,16 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, TBS = get_TBS_DL(mcs, nb_rb); } - LOG_D(MAC, - "dlsch_mcs before and after the rate matching = (%d, %d)\n", - eNB_UE_stats->dlsch_mcs1, mcs); + LOG_D(MAC, + "dlsch_mcs before and after the rate matching = (%d, %d)\n", + eNB_UE_stats->dlsch_mcs1, mcs); #ifdef DEBUG_eNB_SCHEDULER - LOG_D(MAC, - "[eNB %d] CC_id %d Generated DLSCH header (mcs %d, TBS %d, nb_rb %d)\n", - module_idP, CC_id, mcs, TBS, nb_rb); - // msg("[MAC][eNB ] Reminder of DLSCH with random data %d %d %d %d \n", - // TBS, sdu_length_total, offset, TBS-sdu_length_total-offset); + LOG_D(MAC, + "[eNB %d] CC_id %d Generated DLSCH header (mcs %d, TBS %d, nb_rb %d)\n", + module_idP, CC_id, mcs, TBS, nb_rb); + // msg("[MAC][eNB ] Reminder of DLSCH with random data %d %d %d %d \n", + // TBS, sdu_length_total, offset, TBS-sdu_length_total-offset); #endif if (TBS - header_length_total - sdu_length_total - ta_len <= 2) { @@ -1320,12 +1191,13 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, post_padding = 1; } - offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], num_sdus, //num_sdus - sdu_lengths, // - sdu_lcids, 255, // no drx - ta_update, // timing advance - NULL, // contention res id - padding, post_padding); + offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], + num_sdus, //num_sdus + sdu_lengths, // + sdu_lcids, 255, // no drx + ta_update, // timing advance + NULL, // contention res id + padding, post_padding); //#ifdef DEBUG_eNB_SCHEDULER if (ta_update != 31) { @@ -1339,19 +1211,18 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, } //#endif #ifdef DEBUG_eNB_SCHEDULER - LOG_T(MAC, "[eNB %d] First 16 bytes of DLSCH : \n"); + LOG_T(MAC, "[eNB %d] First 16 bytes of DLSCH : \n"); - for (i = 0; i < 16; i++) { - LOG_T(MAC, "%x.", dlsch_buffer[i]); - } + for (i = 0; i < 16; ++i) { + LOG_T(MAC, "%x.", dlsch_buffer[i]); + } - LOG_T(MAC, "\n"); + LOG_T(MAC, "\n"); #endif - // cycle through SDUs and place in dlsch_buffer - memcpy(&UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset], - dlsch_buffer, sdu_length_total); - // memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]); + // cycle through SDUs and place in dlsch_buffer + memcpy(&UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset], dlsch_buffer, sdu_length_total); + // memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]); // fill remainder of DLSCH with 0 for (j = 0; j < (TBS - sdu_length_total - offset); j++) { @@ -1375,14 +1246,12 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, T_INT(subframeP), T_INT(harq_pid), T_BUFFER(UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], TBS)); - UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb; + UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb; - add_ue_dlsch_info(module_idP, - CC_id, UE_id, subframeP, - S_DL_SCHEDULED); - // store stats - eNB->eNB_stats[CC_id].dlsch_bytes_tx += sdu_length_total; - eNB->eNB_stats[CC_id].dlsch_pdus_tx += 1; + add_ue_dlsch_info(module_idP, CC_id, UE_id, subframeP, S_DL_SCHEDULED); + // store stats + eNB->eNB_stats[CC_id].dlsch_bytes_tx += sdu_length_total; + eNB->eNB_stats[CC_id].dlsch_pdus_tx += 1; UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used = nb_rb; UE_list->eNB_UE_stats[CC_id][UE_id].num_mac_sdu_tx = num_sdus; @@ -1391,10 +1260,10 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2 = mcs; UE_list->eNB_UE_stats[CC_id][UE_id].TBS = TBS; - UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes = TBS - sdu_length_total; - UE_list->eNB_UE_stats[CC_id][UE_id].total_sdu_bytes += sdu_length_total; - UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes += TBS; - UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes = TBS - sdu_length_total; + UE_list->eNB_UE_stats[CC_id][UE_id].total_sdu_bytes += sdu_length_total; + UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes += TBS; + UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus += 1; if (cc[CC_id].tdd_Config != NULL) { // TDD UE_list->UE_template[CC_id][UE_id].DAI++; @@ -1419,8 +1288,8 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, if (ue_sched_ctl->pucch1_cqi_update[CC_id] == 1) { ue_sched_ctl->pucch1_cqi_update[CC_id] = 0; - UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame = frameP; - UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe = subframeP; + UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame = frameP; + UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe = subframeP; if (normalized_rx_power > (target_rx_power + 4)) { tpc = 0; //-1 @@ -1443,39 +1312,40 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, tpc = 1; //0 } - dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; - memset((void *) dl_config_pdu, 0, - sizeof(nfapi_dl_config_request_pdu_t)); - dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; - dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu)); - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = get_aggregation(get_bw_index(module_idP, CC_id), - ue_sched_ctl->dl_cqi[CC_id], - format1); - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power - - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0; - //deactivate second codeword - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = 0; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1; - if (cc[CC_id].tdd_Config != NULL) { //TDD - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (UE_list->UE_template[CC_id][UE_id].DAI - 1) & 3; - LOG_D(MAC, - "[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n", - module_idP, CC_id, harq_pid, - (UE_list->UE_template[CC_id][UE_id].DAI - 1), - mcs); - } else { - LOG_D(MAC, - "[eNB %d] Initial transmission CC_id %d : harq_pid %d, mcs %d\n", - module_idP, CC_id, harq_pid, mcs); + dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; + memset((void *) dl_config_pdu, 0, sizeof(nfapi_dl_config_request_pdu_t)); + dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; + dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu)); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = + get_aggregation(get_bw_index(module_idP, CC_id), ue_sched_ctl->dl_cqi[CC_id], format1); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power + + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = + 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0; + //deactivate second codeword + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1; + + if (cc[CC_id].tdd_Config != NULL) { //TDD + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = + (UE_list->UE_template[CC_id][UE_id].DAI - 1) & 3; + LOG_D(MAC, + "[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n", + module_idP, CC_id, harq_pid, + (UE_list->UE_template[CC_id][UE_id].DAI - 1), + mcs); + } else { + LOG_D(MAC, + "[eNB %d] Initial transmission CC_id %d : harq_pid %d, mcs %d\n", + module_idP, CC_id, harq_pid, mcs); } @@ -1490,16 +1360,14 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, dl_req->number_pdu++; dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG; - eNB->DL_req[CC_id].sfn_sf = frameP<<4 | subframeP; - eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST; + eNB->DL_req[CC_id].sfn_sf = frameP << 4 | subframeP; + eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST; - // Toggle NDI for next time - LOG_D(MAC, - "CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n", - CC_id, frameP, subframeP, UE_id, rnti, - harq_pid, - UE_list-> - UE_template[CC_id][UE_id].oldNDI[harq_pid]); + // Toggle NDI for next time + LOG_D(MAC, + "CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n", + CC_id, frameP, subframeP, UE_id, rnti, harq_pid, + UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]); UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid] = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]; UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs; @@ -1550,24 +1418,199 @@ schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP, } } - if (cc[CC_id].tdd_Config != NULL) { // TDD - set_ul_DAI(module_idP, UE_id, CC_id, frameP, subframeP); + if (cc[CC_id].tdd_Config != NULL) { // TDD + set_ul_DAI(module_idP, UE_id, CC_id, frameP, subframeP); } - } // UE_id loop } // CC_id loop - fill_DLSCH_dci(module_idP, frameP, subframeP, mbsfn_flag); stop_meas(&eNB->schedule_dlsch); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH, VCD_FUNCTION_OUT); } +//------------------------------------------------------------------------------ +void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id, + int frameP, + sub_frame_t subframeP, + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]) +//------------------------------------------------------------------------------ +{ + // FIXME: I'm prototyping the algorithm, so there may be arrays and variables that carry redundant information here and in pre_processor_results struct. + + int UE_id, CC_id, rbg, i; + int N_RB_DL, min_rb_unit, tm; + int owned, used; + + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + slice_info_t *sli = &RC.mac[Mod_id]->slice_info; + UE_sched_ctrl *ue_sched_ctl; + COMMON_channels_t *cc; + int N_RBG[NFAPI_CC_MAX]; + + int slice_sorted_list[MAX_NUM_SLICES]; + int slice_idx; + int8_t free_rbgs_map[NFAPI_CC_MAX][N_RBG_MAX]; + int has_traffic[NFAPI_CC_MAX][MAX_NUM_SLICES]; + uint8_t allocation_mask[NFAPI_CC_MAX][N_RBG_MAX]; + + uint16_t (*nb_rbs_remaining)[MAX_MOBILES_PER_ENB]; + uint16_t (*nb_rbs_required)[MAX_MOBILES_PER_ENB]; + uint8_t (*MIMO_mode_indicator)[N_RBG_MAX]; + + // Initialize the free RBGs map + // free_rbgs_map[CC_id][rbg] = -1 if RBG is allocated, + // otherwise it contains the id of the slice it belongs to. + // (Information about slicing must be retained to deal with isolation). + // FIXME: This method does not consider RBGs that are free and belong to no slices + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; ++CC_id) { + cc = &RC.mac[Mod_id]->common_channels[CC_id]; + N_RBG[CC_id] = to_rbg(cc->mib->message.dl_Bandwidth); + for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) { + for (i = 0; i < sli->n_dl; ++i) { + owned = sli->pre_processor_results[i].slice_allocation_mask[CC_id][rbg]; + if (owned) { + used = rballoc_sub[CC_id][rbg]; + free_rbgs_map[CC_id][rbg] = used ? -1 : i; + break; + } + } + } + } + + // Find out which slices need other resources. + // FIXME: I don't think is really needed since we check nb_rbs_remaining later + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; ++CC_id) { + for (i = 0; i < sli->n_dl; ++i) { + has_traffic[CC_id][i] = 0; + for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; ++UE_id) { + if (sli->pre_processor_results[i].nb_rbs_remaining[CC_id][UE_id] > 0) { + has_traffic[CC_id][i] = 1; + break; + } + } + } + } + + slice_priority_sort(Mod_id, slice_sorted_list); + + // MULTIPLEXING + // This part is an adaptation of dlsch_scheduler_pre_processor_allocate() code + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; ++CC_id) { + + N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth); + min_rb_unit = get_min_rb_unit(Mod_id, CC_id); + + for (i = 0; i < sli->n_dl; ++i) { + slice_idx = slice_sorted_list[i]; + + if (has_traffic[CC_id][slice_idx] == 0) continue; + + // Build an ad-hoc allocation mask fo the slice + for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) { + if (free_rbgs_map[CC_id][rbg] == -1) { + // RBG is already allocated + allocation_mask[CC_id][rbg] = 0; + continue; + } + if (sli->dl[free_rbgs_map[CC_id][rbg]].isol == 1) { + // RBG belongs to an isolated slice + allocation_mask[CC_id][rbg] = 0; + continue; + } + // RBG is free + allocation_mask[CC_id][rbg] = 1; + } + + // Sort UE again + // (UE list gets sorted every time pre_processor is called so it is probably dirty at this point) + // FIXME: There is only one UE_list for all slices, so it must be sorted again each time we use it + sort_UEs(Mod_id, slice_idx, frameP, subframeP); + + nb_rbs_remaining = sli->pre_processor_results[slice_idx].nb_rbs_remaining; + nb_rbs_required = sli->pre_processor_results[slice_idx].nb_rbs_required; + MIMO_mode_indicator = sli->pre_processor_results[slice_idx].MIMO_mode_indicator; + + // Allocation + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; + tm = get_tmode(Mod_id, CC_id, UE_id); + + for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) { + + // FIXME: I think that some of these checks are redundant + if (allocation_mask[CC_id][rbg] == 0) continue; + if (rballoc_sub[CC_id][rbg] != 0) continue; + if (ue_sched_ctl->rballoc_sub_UE[CC_id][rbg] != 0) continue; + if (nb_rbs_remaining[CC_id][UE_id] <= 0) continue; + if (ue_sched_ctl->pre_nb_available_rbs[CC_id] >= nb_rbs_required[CC_id][UE_id]) continue; + if (ue_sched_ctl->dl_pow_off[CC_id] == 0) continue; + + if ((rbg == N_RBG[CC_id] - 1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) { + // Allocating last, smaller RBG + if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit - 1) { + rballoc_sub[CC_id][rbg] = 1; + free_rbgs_map[CC_id][rbg] = -1; + ue_sched_ctl->rballoc_sub_UE[CC_id][rbg] = 1; + MIMO_mode_indicator[CC_id][rbg] = 1; + if (tm == 5) { + ue_sched_ctl->dl_pow_off[CC_id] = 1; + } + nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit + 1; + ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1; + } + } else { + // Allocating a standard-sized RBG + if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit) { + rballoc_sub[CC_id][rbg] = 1; + free_rbgs_map[CC_id][rbg] = -1; + ue_sched_ctl->rballoc_sub_UE[CC_id][rbg] = 1; + MIMO_mode_indicator[CC_id][rbg] = 1; + if (tm == 5) { + ue_sched_ctl->dl_pow_off[CC_id] = 1; + } + nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit; + ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit; + } + } + } + } + } + } +} + +//------------------------------------------------------------------------------ +void dlsch_scheduler_qos_multiplexing(module_id_t Mod_id, int frameP, sub_frame_t subframeP) +//------------------------------------------------------------------------------ +{ + int UE_id, CC_id, i; + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + slice_info_t *sli = &RC.mac[Mod_id]->slice_info; + //UE_sched_ctrl *ue_sched_ctl; + + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; ++CC_id) { + for (i = 0; i < sli->n_dl; ++i) { + + // Sort UE again + // FIXME: There is only one UE_list for all slices, so it must be sorted again each time we use it + sort_UEs(Mod_id, (uint8_t)i, frameP, subframeP); + + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + //ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; + + // TODO: Do something here + // ue_sched_ctl->pre_nb_available_rbs[CC_id]; + } + } + } +} + + //------------------------------------------------------------------------------ void fill_DLSCH_dci(module_id_t module_idP, - frame_t frameP, sub_frame_t subframeP, int *mbsfn_flagP) + frame_t frameP, sub_frame_t subframeP, int *mbsfn_flagP) //------------------------------------------------------------------------------ { @@ -1591,9 +1634,9 @@ fill_DLSCH_dci(module_id_t module_idP, start_meas(&eNB->fill_DLSCH_dci); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME - (VCD_SIGNAL_DUMPER_FUNCTIONS_FILL_DLSCH_DCI, VCD_FUNCTION_IN); + (VCD_SIGNAL_DUMPER_FUNCTIONS_FILL_DLSCH_DCI, VCD_FUNCTION_IN); - for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) { + for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) { LOG_D(MAC, "Doing fill DCI for CC_id %d\n", CC_id); if (mbsfn_flagP[CC_id] > 0) @@ -1605,42 +1648,43 @@ fill_DLSCH_dci(module_id_t module_idP, // UE specific DCIs for (UE_id = UE_list->head; UE_id >= 0; - UE_id = UE_list->next[UE_id]) { + UE_id = UE_list->next[UE_id]) { LOG_T(MAC, "CC_id %d, UE_id: %d => status %d\n", CC_id, UE_id, - eNB_dlsch_info[module_idP][CC_id][UE_id].status); + eNB_dlsch_info[module_idP][CC_id][UE_id].status); if (eNB_dlsch_info[module_idP][CC_id][UE_id].status == S_DL_SCHEDULED) { - // clear scheduling flag - eNB_dlsch_info[module_idP][CC_id][UE_id].status = S_DL_WAITING; - rnti = UE_RNTI(module_idP, UE_id); + // clear scheduling flag + eNB_dlsch_info[module_idP][CC_id][UE_id].status = S_DL_WAITING; + rnti = UE_RNTI(module_idP, UE_id); harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP ,subframeP); - nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid]; + nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid]; - /// Synchronizing rballoc with rballoc_sub - for (i = 0; i < N_RBG; i++) { - rballoc_sub[i] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][i]; - } + /// Synchronizing rballoc with rballoc_sub + for (i = 0; i < N_RBG; i++) { + rballoc_sub[i] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][i]; + } - nfapi_dl_config_request_t *DL_req = &RC.mac[module_idP]->DL_req[0]; - nfapi_dl_config_request_pdu_t *dl_config_pdu; - - for (i = 0; - i < DL_req[CC_id].dl_config_request_body.number_pdu; - i++) { - dl_config_pdu = &DL_req[CC_id].dl_config_request_body.dl_config_pdu_list[i]; - if ((dl_config_pdu->pdu_type == NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE) - && (dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti == rnti) - && (dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format != 1)) { - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = allocate_prbs_sub(nb_rb, N_RB_DL, N_RBG,rballoc_sub); - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_allocation_type = 0; - } else - if ((dl_config_pdu->pdu_type == NFAPI_DL_CONFIG_DLSCH_PDU_TYPE) - && (dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.rnti == rnti) - && (dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_allocation_type == 0)) { - dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_block_coding =allocate_prbs_sub(nb_rb, N_RB_DL, N_RBG,rballoc_sub); - } - } + nfapi_dl_config_request_t *DL_req = &RC.mac[module_idP]->DL_req[0]; + nfapi_dl_config_request_pdu_t *dl_config_pdu; + + for (i = 0; + i < DL_req[CC_id].dl_config_request_body.number_pdu; + i++) { + dl_config_pdu = &DL_req[CC_id].dl_config_request_body.dl_config_pdu_list[i]; + if ((dl_config_pdu->pdu_type == NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE) + && (dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti == rnti) + && (dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format != 1)) { + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = allocate_prbs_sub(nb_rb, N_RB_DL, N_RBG, + rballoc_sub); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_allocation_type = 0; + } else if ((dl_config_pdu->pdu_type == NFAPI_DL_CONFIG_DLSCH_PDU_TYPE) + && (dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.rnti == rnti) + && (dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_allocation_type == 0)) { + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_block_coding = allocate_prbs_sub(nb_rb, N_RB_DL, N_RBG, + rballoc_sub); + } + } } } @@ -1652,8 +1696,8 @@ fill_DLSCH_dci(module_id_t module_idP, //------------------------------------------------------------------------------ unsigned char *get_dlsch_sdu(module_id_t module_idP, - int CC_id, frame_t frameP, rnti_t rntiP, - uint8_t TBindex) + int CC_id, frame_t frameP, rnti_t rntiP, + uint8_t TBindex) //------------------------------------------------------------------------------ { @@ -1662,28 +1706,28 @@ unsigned char *get_dlsch_sdu(module_id_t module_idP, if (rntiP == SI_RNTI) { LOG_D(MAC, "[eNB %d] CC_id %d Frame %d Get DLSCH sdu for BCCH \n", - module_idP, CC_id, frameP); + module_idP, CC_id, frameP); return ((unsigned char *) &eNB->common_channels[CC_id].BCCH_pdu.payload[0]); } - if (rntiP==P_RNTI) { - LOG_D(MAC,"[eNB %d] CC_id %d Frame %d Get PCH sdu for PCCH \n", module_idP, CC_id, frameP); + if (rntiP == P_RNTI) { + LOG_D(MAC, "[eNB %d] CC_id %d Frame %d Get PCH sdu for PCCH \n", module_idP, CC_id, frameP); - return((unsigned char *)&eNB->common_channels[CC_id].PCCH_pdu.payload[0]); + return ((unsigned char *) &eNB->common_channels[CC_id].PCCH_pdu.payload[0]); } - UE_id = find_UE_id(module_idP,rntiP); + UE_id = find_UE_id(module_idP, rntiP); if (UE_id != -1) { LOG_D(MAC, - "[eNB %d] Frame %d: CC_id %d Get DLSCH sdu for rnti %x => UE_id %d\n", - module_idP, frameP, CC_id, rntiP, UE_id); + "[eNB %d] Frame %d: CC_id %d Get DLSCH sdu for rnti %x => UE_id %d\n", + module_idP, frameP, CC_id, rntiP, UE_id); return ((unsigned char *) &eNB->UE_list.DLSCH_pdu[CC_id][TBindex][UE_id].payload[0]); } else { LOG_E(MAC, - "[eNB %d] Frame %d: CC_id %d UE with RNTI %x does not exist\n", - module_idP, frameP, CC_id, rntiP); + "[eNB %d] Frame %d: CC_id %d UE with RNTI %x does not exist\n", + module_idP, frameP, CC_id, rntiP); return NULL; } @@ -1700,7 +1744,7 @@ update_ul_dci(module_id_t module_idP, nfapi_hi_dci0_request_t *HI_DCI0_req = &RC.mac[module_idP]->HI_DCI0_req[CC_idP][subframe]; nfapi_hi_dci0_request_pdu_t *hi_dci0_pdu = - &HI_DCI0_req->hi_dci0_request_body.hi_dci0_pdu_list[0]; + &HI_DCI0_req->hi_dci0_request_body.hi_dci0_pdu_list[0]; COMMON_channels_t *cc = &RC.mac[module_idP]->common_channels[CC_idP]; int i; @@ -1711,8 +1755,8 @@ update_ul_dci(module_id_t module_idP, i++) { if ((hi_dci0_pdu[i].pdu_type == NFAPI_HI_DCI0_DCI_PDU_TYPE) && - (hi_dci0_pdu[i].dci_pdu.dci_pdu_rel8.rnti == rntiP)) - hi_dci0_pdu[i].dci_pdu.dci_pdu_rel8.dl_assignment_index = (daiP - 1) & 3; + (hi_dci0_pdu[i].dci_pdu.dci_pdu_rel8.rnti == rntiP)) + hi_dci0_pdu[i].dci_pdu.dci_pdu_rel8.dl_assignment_index = (daiP - 1) & 3; } } @@ -1722,84 +1766,83 @@ update_ul_dci(module_id_t module_idP, //------------------------------------------------------------------------------ void set_ue_dai(sub_frame_t subframeP, - int UE_id, uint8_t CC_id, uint8_t tdd_config, - UE_list_t * UE_list) + int UE_id, uint8_t CC_id, uint8_t tdd_config, + UE_list_t *UE_list) //------------------------------------------------------------------------------ { switch (tdd_config) { - case 0: - if ((subframeP == 0) || (subframeP == 1) || (subframeP == 3) - || (subframeP == 5) || (subframeP == 6) || (subframeP == 8)) { - UE_list->UE_template[CC_id][UE_id].DAI = 0; - } + case 0: + if ((subframeP == 0) || (subframeP == 1) || (subframeP == 3) + || (subframeP == 5) || (subframeP == 6) || (subframeP == 8)) { + UE_list->UE_template[CC_id][UE_id].DAI = 0; + } - break; + break; - case 1: - if ((subframeP == 0) || (subframeP == 4) || (subframeP == 5) - || (subframeP == 9)) { - UE_list->UE_template[CC_id][UE_id].DAI = 0; - } + case 1: + if ((subframeP == 0) || (subframeP == 4) || (subframeP == 5) + || (subframeP == 9)) { + UE_list->UE_template[CC_id][UE_id].DAI = 0; + } - break; + break; - case 2: - if ((subframeP == 4) || (subframeP == 5)) { - UE_list->UE_template[CC_id][UE_id].DAI = 0; - } + case 2: + if ((subframeP == 4) || (subframeP == 5)) { + UE_list->UE_template[CC_id][UE_id].DAI = 0; + } - break; + break; - case 3: - if ((subframeP == 5) || (subframeP == 7) || (subframeP == 9)) { - UE_list->UE_template[CC_id][UE_id].DAI = 0; - } + case 3: + if ((subframeP == 5) || (subframeP == 7) || (subframeP == 9)) { + UE_list->UE_template[CC_id][UE_id].DAI = 0; + } - break; + break; - case 4: - if ((subframeP == 0) || (subframeP == 6)) { - UE_list->UE_template[CC_id][UE_id].DAI = 0; - } + case 4: + if ((subframeP == 0) || (subframeP == 6)) { + UE_list->UE_template[CC_id][UE_id].DAI = 0; + } - break; + break; - case 5: - if (subframeP == 9) { - UE_list->UE_template[CC_id][UE_id].DAI = 0; - } + case 5: + if (subframeP == 9) { + UE_list->UE_template[CC_id][UE_id].DAI = 0; + } - break; + break; - case 6: - if ((subframeP == 0) || (subframeP == 1) || (subframeP == 5) - || (subframeP == 6) || (subframeP == 9)) { - UE_list->UE_template[CC_id][UE_id].DAI = 0; - } + case 6: + if ((subframeP == 0) || (subframeP == 1) || (subframeP == 5) + || (subframeP == 6) || (subframeP == 9)) { + UE_list->UE_template[CC_id][UE_id].DAI = 0; + } - break; + break; - default: - UE_list->UE_template[CC_id][UE_id].DAI = 0; - LOG_I(MAC, "unknow TDD config %d\n", tdd_config); - break; + default: + UE_list->UE_template[CC_id][UE_id].DAI = 0; + LOG_I(MAC, "unknown TDD config %d\n", tdd_config); + break; } } -void schedule_PCH(module_id_t module_idP,frame_t frameP,sub_frame_t subframeP) -{ +void schedule_PCH(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP) { /* DCI:format 1A/1C P-RNTI:0xFFFE */ /* PDU:eNB_rrc_inst[Mod_idP].common_channels[CC_id].PCCH_pdu.payload */ - uint16_t pcch_sdu_length; - int mcs = -1; - int CC_id; - eNB_MAC_INST *eNB = RC.mac[module_idP]; - COMMON_channels_t *cc; - uint8_t *vrb_map; - int n_rb_dl; - int first_rb = -1; - nfapi_dl_config_request_pdu_t *dl_config_pdu; - nfapi_tx_request_pdu_t *TX_req; + uint16_t pcch_sdu_length; + int mcs = -1; + int CC_id; + eNB_MAC_INST *eNB = RC.mac[module_idP]; + COMMON_channels_t *cc; + uint8_t *vrb_map; + int n_rb_dl; + int first_rb = -1; + nfapi_dl_config_request_pdu_t *dl_config_pdu; + nfapi_tx_request_pdu_t *TX_req; nfapi_dl_config_request_body_t *dl_req; #ifdef FORMAT1C int gap_index = 0; /* indicate which gap(1st or 2nd) is used (0:1st) */ @@ -1823,9 +1866,9 @@ void schedule_PCH(module_id_t module_idP,frame_t frameP,sub_frame_t subframeP) start_meas(&eNB->schedule_pch); - for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) { + for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) { cc = &eNB->common_channels[CC_id]; - vrb_map = (void*)&cc->vrb_map; + vrb_map = (void *) &cc->vrb_map; n_rb_dl = to_prb(cc->mib->message.dl_Bandwidth); dl_req = &eNB->DL_req[CC_id].dl_config_request_body; for (uint16_t i = 0; i < MAX_MOBILES_PER_ENB; i++) { @@ -1833,17 +1876,18 @@ void schedule_PCH(module_id_t module_idP,frame_t frameP,sub_frame_t subframeP) continue; } if (frameP % UE_PF_PO[CC_id][i].T == UE_PF_PO[CC_id][i].PF_min && subframeP == UE_PF_PO[CC_id][i].PO) { - pcch_sdu_length = mac_rrc_data_req(module_idP, + pcch_sdu_length = mac_rrc_data_req(module_idP, CC_id, frameP, - PCCH,1, + PCCH, 1, &cc->PCCH_pdu.payload[0], i); // used for ue index - if (pcch_sdu_length == 0) { - LOG_D(MAC,"[eNB %d] Frame %d subframe %d: PCCH not active(size = 0 byte)\n", module_idP,frameP, subframeP); - continue; - } - LOG_D(MAC,"[eNB %d] Frame %d subframe %d: PCCH->PCH CC_id %d UE_id %d, Received %d bytes \n", module_idP, frameP, subframeP, CC_id,i, pcch_sdu_length); + if (pcch_sdu_length == 0) { + LOG_D(MAC, "[eNB %d] Frame %d subframe %d: PCCH not active(size = 0 byte)\n", module_idP, frameP, subframeP); + continue; + } + LOG_D(MAC, "[eNB %d] Frame %d subframe %d: PCCH->PCH CC_id %d UE_id %d, Received %d bytes \n", module_idP, + frameP, subframeP, CC_id, i, pcch_sdu_length); #ifdef FORMAT1C //NO SIB if ((subframeP == 0 || subframeP == 1 || subframeP == 2 || subframeP == 4 || subframeP == 6 || subframeP == 9) || @@ -1968,56 +2012,55 @@ void schedule_PCH(module_id_t module_idP,frame_t frameP,sub_frame_t subframeP) } } - vrb_map[first_rb] = 1; - vrb_map[first_rb+1] = 1; - vrb_map[first_rb+2] = 1; - vrb_map[first_rb+3] = 1; - /* Get MCS for length of PCH */ - if (pcch_sdu_length <= get_TBS_DL(0,3)) { - mcs=0; - } else if (pcch_sdu_length <= get_TBS_DL(1,3)) { - mcs=1; - } else if (pcch_sdu_length <= get_TBS_DL(2,3)) { - mcs=2; - } else if (pcch_sdu_length <= get_TBS_DL(3,3)) { - mcs=3; - } else if (pcch_sdu_length <= get_TBS_DL(4,3)) { - mcs=4; - } else if (pcch_sdu_length <= get_TBS_DL(5,3)) { - mcs=5; - } else if (pcch_sdu_length <= get_TBS_DL(6,3)) { - mcs=6; - } else if (pcch_sdu_length <= get_TBS_DL(7,3)) { - mcs=7; - } else if (pcch_sdu_length <= get_TBS_DL(8,3)) { - mcs=8; - } else if (pcch_sdu_length <= get_TBS_DL(9,3)) { - mcs=9; - } + vrb_map[first_rb] = 1; + vrb_map[first_rb + 1] = 1; + vrb_map[first_rb + 2] = 1; + vrb_map[first_rb + 3] = 1; + /* Get MCS for length of PCH */ + if (pcch_sdu_length <= get_TBS_DL(0, 3)) { + mcs = 0; + } else if (pcch_sdu_length <= get_TBS_DL(1, 3)) { + mcs = 1; + } else if (pcch_sdu_length <= get_TBS_DL(2, 3)) { + mcs = 2; + } else if (pcch_sdu_length <= get_TBS_DL(3, 3)) { + mcs = 3; + } else if (pcch_sdu_length <= get_TBS_DL(4, 3)) { + mcs = 4; + } else if (pcch_sdu_length <= get_TBS_DL(5, 3)) { + mcs = 5; + } else if (pcch_sdu_length <= get_TBS_DL(6, 3)) { + mcs = 6; + } else if (pcch_sdu_length <= get_TBS_DL(7, 3)) { + mcs = 7; + } else if (pcch_sdu_length <= get_TBS_DL(8, 3)) { + mcs = 8; + } else if (pcch_sdu_length <= get_TBS_DL(9, 3)) { + mcs = 9; + } #endif - dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; - memset((void*)dl_config_pdu,0,sizeof(nfapi_dl_config_request_pdu_t)); - dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; - dl_config_pdu->pdu_size = (uint8_t)(2+sizeof(nfapi_dl_config_dci_dl_pdu)); + dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; + memset((void *) dl_config_pdu, 0, sizeof(nfapi_dl_config_request_pdu_t)); + dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; + dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu)); #ifdef FORMAT1C - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1C; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = getRIV(n_vrb_dl/n_rb_step, first_rb/n_rb_step, Lcrbs/n_rb_step); - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.ngap = n_gap; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1C; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = getRIV(n_vrb_dl/n_rb_step, first_rb/n_rb_step, Lcrbs/n_rb_step); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.ngap = n_gap; #else - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1A; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = 0; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // no TPC - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 1; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = getRIV(n_rb_dl,first_rb,4); - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.virtual_resource_block_assignment_flag = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1A; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // no TPC + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 1; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = getRIV(n_rb_dl, first_rb, 4); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.virtual_resource_block_assignment_flag = 0; #endif - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = 4; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = 0xFFFE; // P-RNTI - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 2; // P-RNTI : see Table 4-10 from SCF082 - nFAPI specifications - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs; - + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = 4; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = 0xFFFE; // P-RNTI + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 2; // P-RNTI : see Table 4-10 from SCF082 - nFAPI specifications + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs; if (!CCE_allocation_infeasible(module_idP, CC_id, 0, subframeP, dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level, P_RNTI)) { LOG_D(MAC,"Frame %d: Subframe %d : Adding common DCI for P_RNTI\n", frameP,subframeP); @@ -2034,12 +2077,12 @@ void schedule_PCH(module_id_t module_idP,frame_t frameP,sub_frame_t subframeP) dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.pdu_index = eNB->pdu_index[CC_id]; dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.rnti = 0xFFFE; #ifdef FORMAT1C - dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_allocation_type = 3; // format 1C - dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_block_coding = getRIV(n_vrb_dl/n_rb_step, first_rb/n_rb_step, Lcrbs/n_rb_step); + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_allocation_type = 3; // format 1C + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_block_coding = getRIV(n_vrb_dl/n_rb_step, first_rb/n_rb_step, Lcrbs/n_rb_step); #else - dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_allocation_type = 2; // format 1A/1B/1D - dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_block_coding = getRIV(n_rb_dl,first_rb,4); - dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.virtual_resource_block_assignment_flag = 0; // localized + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_allocation_type = 2; // format 1A/1B/1D + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_block_coding = getRIV(n_rb_dl, first_rb, 4); + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.virtual_resource_block_assignment_flag = 0; // localized #endif dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.modulation = 2; //QPSK dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.redundancy_version = 1; @@ -2118,3 +2161,27 @@ void schedule_PCH(module_id_t module_idP,frame_t frameP,sub_frame_t subframeP) stop_meas(&eNB->schedule_pch); return; } + +static int slice_priority_compare(const void *_a, const void *_b, void *_c) +{ + const int slice_id1 = *(const int *) _a; + const int slice_id2 = *(const int *) _b; + const module_id_t Mod_id = *(int *) _c; + const slice_info_t *sli = &RC.mac[Mod_id]->slice_info; + + if (sli->dl[slice_id1].prio > sli->dl[slice_id2].prio) { + return -1; + } + return 1; +} + +void slice_priority_sort(module_id_t Mod_id, int slice_list[MAX_NUM_SLICES]) +{ + int i; + for (i = 0; i < RC.mac[Mod_id]->slice_info.n_dl; ++i) { + slice_list[i] = i; + } + + qsort_r(slice_list, RC.mac[Mod_id]->slice_info.n_dl, sizeof(int), + slice_priority_compare, &Mod_id); +} diff --git a/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c b/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c index 06761ed59f1771d2e91bb164750af2aabab7451f..9ab4306000ffe05ba7686a2fcf01f14e75ccb4cc 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c @@ -542,8 +542,9 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, uint16_t temp_total_rbs_count; unsigned char temp_total_ue_count; unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]; + uint8_t slice_allocation[MAX_NUM_CCs][N_RBG_MAX]; int UE_id, i; - uint16_t j; + uint16_t j,c; uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; // uint16_t nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; @@ -554,8 +555,6 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, uint8_t CC_id; UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; - int N_RB_DL; - int transmission_mode = 0; UE_sched_ctrl *ue_sched_ctl; // int rrc_status = RRC_IDLE; COMMON_channels_t *cc; @@ -590,14 +589,14 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, dlsch_scheduler_pre_processor_reset(Mod_id, - UE_id, - CC_id, + 0, frameP, subframeP, - N_RBG[CC_id], + min_rb_unit, (uint16_t (*)[NUMBER_OF_UE_MAX])nb_rbs_required, rballoc_sub, - MIMO_mode_indicator); + MIMO_mode_indicator, + mbsfn_flag); } } @@ -621,7 +620,6 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, average_rbs_per_user[CC_id] = 0; cc = &RC.mac[Mod_id]->common_channels[CC_id]; // Get total available RBS count and total UE count - N_RB_DL = to_prb(cc->mib->message.dl_Bandwidth); temp_total_rbs_count = RC.mac[Mod_id]->eNB_stats[CC_id].available_prbs; temp_total_ue_count = dlsch_ue_select[CC_id].ue_num; @@ -658,19 +656,22 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, nb_rbs_required_remaining[CC_id][UE_id] = cmin(average_rbs_per_user[CC_id], dlsch_ue_select[CC_id].list[i].nb_rb); } - transmission_mode = get_tmode(Mod_id,CC_id,UE_id); + /* slicing support has been introduced into the scheduler. Provide dummy + * data so that the preprocessor "simply works" */ + for (c = 0; c < MAX_NUM_CCs; ++c) + for (j = 0; j < N_RBG_MAX; ++j) + slice_allocation[c][j] = 1; LOG_T(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n "); dlsch_scheduler_pre_processor_allocate (Mod_id, UE_id, CC_id, N_RBG[CC_id], - transmission_mode, min_rb_unit[CC_id], - N_RB_DL, (uint16_t (*)[NUMBER_OF_UE_MAX])nb_rbs_required, (uint16_t (*)[NUMBER_OF_UE_MAX])nb_rbs_required_remaining, rballoc_sub, + slice_allocation, MIMO_mode_indicator); temp_total_rbs_count -= ue_sched_ctl->pre_nb_available_rbs[CC_id]; temp_total_ue_count--; diff --git a/openair2/LAYER2/MAC/eNB_scheduler_primitives.c b/openair2/LAYER2/MAC/eNB_scheduler_primitives.c index a41e0db6c089acc0a19f0e687e2b429db97f91ec..c9d3d9dbacda4506cac3d8ea568f04899431d40e 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_primitives.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_primitives.c @@ -62,8 +62,6 @@ extern uint16_t frame_cnt; extern RAN_CONTEXT_t RC; -extern int n_active_slices; - int choose(int n, int k) { int res = 1; @@ -1853,7 +1851,7 @@ rnti_t UE_RNTI(module_id_t mod_idP, int ue_idP) return (rnti); } - LOG_D(MAC, "[eNB %d] Couldn't find RNTI for UE %d\n", mod_idP, ue_idP); + //LOG_D(MAC, "[eNB %d] Couldn't find RNTI for UE %d\n", mod_idP, ue_idP); //display_backtrace(); return (NOT_A_RNTI); } @@ -1963,6 +1961,9 @@ int add_new_ue(module_id_t mod_idP, int cc_idP, rnti_t rntiP, int harq_pidP sizeof(eNB_UE_STATS)); UE_list->UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 0; + /* default slice in case there was something different */ + UE_list->assoc_dl_slice_idx[UE_id] = 0; + UE_list->assoc_ul_slice_idx[UE_id] = 0; UE_list->UE_sched_ctrl[UE_id].ta_update = 31; @@ -4536,19 +4537,31 @@ harq_indication(module_id_t mod_idP, int CC_idP, frame_t frameP, // Flexran Slicing functions -uint16_t flexran_nb_rbs_allowed_slice(float rb_percentage, int total_rbs) +uint16_t nb_rbs_allowed_slice(float rb_percentage, int total_rbs) { return (uint16_t) floor(rb_percentage * total_rbs); } -int ue_slice_membership(int UE_id, int slice_id) +int ue_dl_slice_membership(module_id_t mod_id, int UE_id, int slice_idx) { - if ((slice_id < 0) || (slice_id > n_active_slices)) - LOG_W(MAC, "out of range slice id %d\n", slice_id); - + if ((slice_idx < 0) + || (slice_idx >= RC.mac[mod_id]->slice_info.n_dl)) { + LOG_W(MAC, "out of range slice index %d (slice ID %d)\n", + slice_idx, RC.mac[mod_id]->slice_info.dl[slice_idx].id); + return 0; + } + return RC.mac[mod_id]->UE_list.active[UE_id] == TRUE + && RC.mac[mod_id]->UE_list.assoc_dl_slice_idx[UE_id] == slice_idx; +} - if ((UE_id % n_active_slices) == slice_id) { - return 1; // this ue is a member of this slice +int ue_ul_slice_membership(module_id_t mod_id, int UE_id, int slice_idx) +{ + if ((slice_idx < 0) + || (slice_idx >= RC.mac[mod_id]->slice_info.n_ul)) { + LOG_W(MAC, "out of range slice index %d (slice ID %d)\n", + slice_idx, RC.mac[mod_id]->slice_info.dl[slice_idx].id); + return 0; } - return 0; + return RC.mac[mod_id]->UE_list.active[UE_id] == TRUE + && RC.mac[mod_id]->UE_list.assoc_ul_slice_idx[UE_id] == slice_idx; } diff --git a/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c b/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c index 16673b5c3300a3087ae76234a05f8d17230e5536..31c11e2c0a942fb42d882132644bcc0376ba148d 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c @@ -63,6 +63,9 @@ #include "T.h" +#include "common/ran_context.h" +extern RAN_CONTEXT_t RC; + #define ENABLE_MAC_PAYLOAD_DEBUG #define DEBUG_eNB_SCHEDULER 1 @@ -72,46 +75,14 @@ extern uint16_t sfnsf_add_subframe(uint16_t frameP, uint16_t subframeP, int offs extern int oai_nfapi_ul_config_req(nfapi_ul_config_request_t *ul_config_req); extern uint8_t nfapi_mode; -extern uint8_t nfapi_mode; - // This table holds the allowable PRB sizes for ULSCH transmissions uint8_t rb_table[34] = { 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100 }; -/* number of active slices for past and current time*/ -int n_active_slices_uplink = 1; -int n_active_slices_current_uplink = 1; - -/* RB share for each slice for past and current time*/ -float avg_slice_percentage_uplink=0.25; -float slice_percentage_uplink[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0}; -float slice_percentage_current_uplink[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0}; -float total_slice_percentage_uplink = 0; -float total_slice_percentage_current_uplink = 0; - -// MAX MCS for each slice for past and current time -int slice_maxmcs_uplink[MAX_NUM_SLICES] = {20, 20, 20, 20}; -int slice_maxmcs_current_uplink[MAX_NUM_SLICES] = {20,20,20,20}; - -/*resource blocks allowed*/ -uint16_t nb_rbs_allowed_slice_uplink[MAX_NUM_CCs][MAX_NUM_SLICES]; -/*Slice Update */ -int update_ul_scheduler[MAX_NUM_SLICES] = {1, 1, 1, 1}; -int update_ul_scheduler_current[MAX_NUM_SLICES] = {1, 1, 1, 1}; - -/* name of available scheduler*/ -char *ul_scheduler_type[MAX_NUM_SLICES] = {"schedule_ulsch_rnti", - "schedule_ulsch_rnti", - "schedule_ulsch_rnti", - "schedule_ulsch_rnti" -}; extern mui_t rrc_eNB_mui; -/* Slice Function Pointer */ -slice_scheduler_ul slice_sched_ul[MAX_NUM_SLICES] = {0}; - void rx_sdu(const module_id_t enb_mod_idP, const int CC_idP, @@ -432,10 +403,10 @@ rx_sdu(const module_id_t enb_mod_idP, UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[lcgid] = BSR_TABLE[bsr]; UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer = - UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[0] + - UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[1] + - UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[2] + - UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[3]; + UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] + + UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID1] + + UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID2] + + UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3]; //UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer += UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer / 4; RC.eNB[enb_mod_idP][CC_idP]->pusch_stats_bsr[UE_id][(frameP * 10) + subframeP] = (payload_ptr[0] & 0x3f); @@ -466,10 +437,10 @@ rx_sdu(const module_id_t enb_mod_idP, int bsr2 = ((payload_ptr[1] & 0x0F) << 2) | ((payload_ptr[2] & 0xC0) >> 6); int bsr3 = payload_ptr[2] & 0x3F; - lcgid_updated[0] = 1; - lcgid_updated[1] = 1; - lcgid_updated[2] = 1; - lcgid_updated[3] = 1; + lcgid_updated[LCGID0] = 1; + lcgid_updated[LCGID1] = 1; + lcgid_updated[LCGID2] = 1; + lcgid_updated[LCGID3] = 1; // update buffer info UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] = BSR_TABLE[bsr0]; @@ -478,10 +449,10 @@ rx_sdu(const module_id_t enb_mod_idP, UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3] = BSR_TABLE[bsr3]; UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer = - UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[0] + - UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[1] + - UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[2] + - UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[3]; + UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] + + UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID1] + + UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID2] + + UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3]; //UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer += UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer / 4; LOG_D(MAC, @@ -951,9 +922,10 @@ void schedule_ulsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP) { - uint16_t first_rb[MAX_NUM_CCs], i; + uint16_t first_rb[NFAPI_CC_MAX], i; int CC_id; eNB_MAC_INST *mac = RC.mac[module_idP]; + slice_info_t *sli = &RC.mac[module_idP]->slice_info; COMMON_channels_t *cc; start_meas(&mac->schedule_ulsch); @@ -1034,12 +1006,10 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP, break; } } - if (sched_subframe < subframeP) sched_frame++; - - for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) { - + if (sched_subframe < subframeP) sched_frame++; + for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) { //leave out first RB for PUCCH first_rb[CC_id] = 1; @@ -1066,102 +1036,9 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP, } } - // perform slice-specifc operations - - total_slice_percentage_uplink=0; - avg_slice_percentage_uplink=1.0/n_active_slices_uplink; - - // reset the slice percentage for inactive slices - for (i = n_active_slices_uplink; i< MAX_NUM_SLICES; i++) { - slice_percentage_uplink[i]=0; - } - for (i = 0; i < n_active_slices_uplink; i++) { - if (slice_percentage_uplink[i] < 0 ){ - LOG_W(MAC, "[eNB %d] frame %d subframe %d:invalid slice %d percentage %f. resetting to zero", - module_idP, frameP, subframeP, i, slice_percentage_uplink[i]); - slice_percentage_uplink[i]=0; - } - total_slice_percentage_uplink+=slice_percentage_uplink[i]; - } - - for (i = 0; i < n_active_slices_uplink; i++) { - - // Load any updated functions - if (update_ul_scheduler[i] > 0 ) { - slice_sched_ul[i] = dlsym(NULL, ul_scheduler_type[i]); - update_ul_scheduler[i] = 0; - update_ul_scheduler_current[i] = 0; - //slice_percentage_current_uplink[i]= slice_percentage_uplink[i]; - //total_slice_percentage_current_uplink+=slice_percentage_uplink[i]; - //if (total_slice_percentage_current_uplink> 1) - //total_slice_percentage_current_uplink=1; - LOG_I(MAC,"update ul scheduler slice %d\n", i); - } - // the new total RB share is within the range - if (total_slice_percentage_uplink <= 1.0){ - - // check if the number of slices has changed, and log - if (n_active_slices_current_uplink != n_active_slices_uplink ){ - if ((n_active_slices_uplink > 0) && (n_active_slices_uplink <= MAX_NUM_SLICES)) { - LOG_I(MAC,"[eNB %d]frame %d subframe %d: number of active UL slices has changed: %d-->%d\n", - module_idP, frameP, subframeP, n_active_slices_current_uplink, n_active_slices_uplink); - n_active_slices_current_uplink = n_active_slices_uplink; - } else { - LOG_W(MAC,"invalid number of UL slices %d, revert to the previous value %d\n", - n_active_slices_uplink, n_active_slices_current_uplink); - n_active_slices_uplink = n_active_slices_current_uplink; - } - } - - // check if the slice rb share has changed, and log the console - if (slice_percentage_current_uplink[i] != slice_percentage_uplink[i]){ - LOG_I(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: total percentage %f-->%f, slice RB percentage has changed: %f-->%f\n", - module_idP, i, frameP, subframeP, total_slice_percentage_current_uplink, - total_slice_percentage_uplink, slice_percentage_current_uplink[i], slice_percentage_uplink[i]); - total_slice_percentage_current_uplink = total_slice_percentage_uplink; - slice_percentage_current_uplink[i] = slice_percentage_uplink[i]; - } - - // check if the slice max MCS, and log the console - if (slice_maxmcs_current_uplink[i] != slice_maxmcs_uplink[i]){ - if ((slice_maxmcs_uplink[i] >= 0) && (slice_maxmcs_uplink[i] <= 16)){ - LOG_I(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: slice MAX MCS has changed: %d-->%d\n", - module_idP, i, frameP, subframeP, slice_maxmcs_current_uplink[i], slice_maxmcs_uplink[i]); - slice_maxmcs_current_uplink[i] = slice_maxmcs_uplink[i]; - } else { - LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid slice max mcs %d, revert the previous value %d\n", - module_idP, i, slice_maxmcs_uplink[i],slice_maxmcs_current_uplink[i]); - slice_maxmcs_uplink[i] = slice_maxmcs_current_uplink[i]; - } - } - - // check if a new scheduler, and log the console - if (update_ul_scheduler_current[i] != update_ul_scheduler[i]){ - LOG_I(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: UL scheduler for this slice is updated: %s \n", - module_idP, i, frameP, subframeP, ul_scheduler_type[i]); - update_ul_scheduler_current[i] = update_ul_scheduler[i]; - } - } else { - if (n_active_slices_uplink == n_active_slices_current_uplink) { - LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid total RB share (%f->%f), reduce proportionally the RB share by 0.1\n", - module_idP, i, total_slice_percentage_current_uplink, total_slice_percentage_uplink); - if (slice_percentage_uplink[i] > avg_slice_percentage_uplink) { - slice_percentage_uplink[i] -= 0.1; - total_slice_percentage_uplink -= 0.1; - } - } else { - // here we can correct the values, e.g. reduce proportionally - LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid total RB share (%f->%f), revert the number of slice to its previous value (%d->%d)\n", - module_idP, i, total_slice_percentage_current_uplink, - total_slice_percentage_uplink, n_active_slices_uplink, - n_active_slices_current_uplink); - n_active_slices_uplink = n_active_slices_current_uplink; - slice_percentage_uplink[i] = slice_percentage_current_uplink[i]; - } - } - + for (i = 0; i < sli->n_ul; i++) { // Run each enabled slice-specific schedulers one by one - slice_sched_ul[i](module_idP, i, frameP, subframeP, sched_subframe, first_rb); + sli->ul[i].sched_cb(module_idP, i, frameP, subframeP, sched_subframe, first_rb); } stop_meas(&mac->schedule_ulsch); @@ -1169,7 +1046,7 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP, void schedule_ulsch_rnti(module_id_t module_idP, - slice_id_t slice_id, + int slice_idx, frame_t frameP, sub_frame_t subframeP, unsigned char sched_subframeP, uint16_t * first_rb) @@ -1191,12 +1068,14 @@ schedule_ulsch_rnti(module_id_t module_idP, eNB_MAC_INST *mac = RC.mac[module_idP]; COMMON_channels_t *cc = mac->common_channels; UE_list_t *UE_list = &mac->UE_list; + slice_info_t *sli = &RC.mac[module_idP]->slice_info; UE_TEMPLATE *UE_template; UE_sched_ctrl *UE_sched_ctrl; int sched_frame = frameP; int rvidx_tab[4] = { 0, 2, 3, 1 }; uint16_t ul_req_index; uint8_t dlsch_flag; + int first_rb_slice[NFAPI_CC_MAX]; if (sched_subframeP < subframeP) sched_frame++; @@ -1208,9 +1087,18 @@ schedule_ulsch_rnti(module_id_t module_idP, nfapi_ul_config_request_t *ul_req_tmp = &mac->UL_req_tmp[CC_id][sched_subframeP]; nfapi_ul_config_request_body_t *ul_req_tmp_body = &ul_req_tmp->ul_config_request_body; nfapi_ul_config_ulsch_harq_information *ulsch_harq_information; + + for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; ++CC_id) { + N_RB_UL = to_prb(cc[CC_id].ul_Bandwidth); + UE_list->first_rb_offset[CC_id][slice_idx] = cmin(N_RB_UL, sli->ul[slice_idx].first_rb); + } + //LOG_D(MAC, "entering ulsch preprocesor\n"); - ulsch_scheduler_pre_processor(module_idP, slice_id, frameP, subframeP, sched_subframeP, first_rb); + ulsch_scheduler_pre_processor(module_idP, slice_idx, frameP, subframeP, sched_subframeP, first_rb); + for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; ++CC_id) { + first_rb_slice[CC_id] = first_rb[CC_id] + UE_list->first_rb_offset[CC_id][slice_idx]; + } //LOG_D(MAC, "exiting ulsch preprocesor\n"); hi_dci0_req->sfn_sf = (frameP << 4) + subframeP; @@ -1219,7 +1107,7 @@ schedule_ulsch_rnti(module_id_t module_idP, for (UE_id = UE_list->head_ul; UE_id >= 0; UE_id = UE_list->next_ul[UE_id]) { - if (!ue_slice_membership(UE_id, slice_id)) + if (!ue_ul_slice_membership(module_idP, UE_id, slice_idx)) continue; // don't schedule if Msg4 is not received yet @@ -1296,7 +1184,7 @@ schedule_ulsch_rnti(module_id_t module_idP, } /* be sure that there are some free RBs */ - if (first_rb[CC_id] >= N_RB_UL - 1) { + if (first_rb_slice[CC_id] >= N_RB_UL - 1) { LOG_W(MAC, "[eNB %d] frame %d subframe %d, UE %d/%x CC %d: dropping, not enough RBs\n", module_idP, frameP, subframeP, UE_id, rnti, CC_id); @@ -1407,7 +1295,7 @@ schedule_ulsch_rnti(module_id_t module_idP, UE_template->oldNDI_UL[harq_pid] = ndi; UE_list->eNB_UE_stats[CC_id][UE_id].normalized_rx_power = normalized_rx_power; UE_list->eNB_UE_stats[CC_id][UE_id].target_rx_power = target_rx_power; - UE_template->mcs_UL[harq_pid] = cmin(UE_template->pre_assigned_mcs_ul, slice_maxmcs_uplink[slice_id]); + UE_template->mcs_UL[harq_pid] = cmin(UE_template->pre_assigned_mcs_ul, sli->ul[slice_idx].maxmcs); UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1= UE_template->mcs_UL[harq_pid]; //cmin (UE_template->pre_assigned_mcs_ul, openair_daq_vars.target_ue_ul_mcs); // adjust, based on user-defined MCS if (UE_template->pre_allocated_rb_table_index_ul >= 0) { @@ -1421,7 +1309,7 @@ schedule_ulsch_rnti(module_id_t module_idP, // buffer_occupancy = UE_template->ul_total_buffer; - while (((rb_table[rb_table_index] > (N_RB_UL - 1 - first_rb[CC_id])) + while (((rb_table[rb_table_index] > (N_RB_UL - first_rb_slice[CC_id])) || (rb_table[rb_table_index] > 45)) && (rb_table_index > 0)) { rb_table_index--; @@ -1438,7 +1326,7 @@ schedule_ulsch_rnti(module_id_t module_idP, T_INT(CC_id), T_INT(rnti), T_INT(frameP), T_INT(subframeP), T_INT(harq_pid), T_INT(UE_template->mcs_UL[harq_pid]), - T_INT(first_rb[CC_id]), + T_INT(first_rb_slice[CC_id]), T_INT(rb_table[rb_table_index]), T_INT(UE_template->TBS_UL[harq_pid]), T_INT(ndi)); @@ -1448,14 +1336,14 @@ schedule_ulsch_rnti(module_id_t module_idP, module_idP, harq_pid, rnti, CC_id, frameP, subframeP, UE_id, UE_template->mcs_UL[harq_pid], - first_rb[CC_id], rb_table[rb_table_index], + first_rb_slice[CC_id], rb_table[rb_table_index], rb_table_index, UE_template->TBS_UL[harq_pid], harq_pid); // bad indices : 20 (40 PRB), 21 (45 PRB), 22 (48 PRB) //store for possible retransmission UE_template->nb_rb_ul[harq_pid] = rb_table[rb_table_index]; - UE_template->first_rb_ul[harq_pid] = first_rb[CC_id]; + UE_template->first_rb_ul[harq_pid] = first_rb_slice[CC_id]; UE_sched_ctrl->ul_scheduled |= (1 << harq_pid); if (UE_id == UE_list->head) @@ -1487,7 +1375,7 @@ schedule_ulsch_rnti(module_id_t module_idP, hi_dci0_pdu->dci_pdu.dci_pdu_rel8.aggregation_level = aggregation; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.rnti = rnti; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.transmission_power = 6000; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = first_rb[CC_id]; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = first_rb_slice[CC_id]; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.number_of_resource_block = rb_table[rb_table_index]; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.mcs_1 = UE_template->mcs_UL[harq_pid]; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.cyclic_shift_2_for_drms = cshift; @@ -1523,7 +1411,7 @@ schedule_ulsch_rnti(module_id_t module_idP, } // Add UL_config PDUs - fill_nfapi_ulsch_config_request_rel8(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index], cqi_req, cc, UE_template->physicalConfigDedicated, get_tmode(module_idP, CC_id, UE_id), mac->ul_handle, rnti, first_rb[CC_id], // resource_block_start + fill_nfapi_ulsch_config_request_rel8(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index], cqi_req, cc, UE_template->physicalConfigDedicated, get_tmode(module_idP, CC_id, UE_id), mac->ul_handle, rnti, first_rb_slice[CC_id], // resource_block_start rb_table[rb_table_index], // number_of_resource_blocks UE_template->mcs_UL[harq_pid], cshift, // cyclic_shift_2_for_drms 0, // frequency_hopping_enabled_flag @@ -1586,13 +1474,13 @@ schedule_ulsch_rnti(module_id_t module_idP, LOG_D(MAC,"[PUSCH %d] SFN/SF:%04d%d UL_CFG:SFN/SF:%04d%d CQI:%d for UE %d/%x\n", harq_pid,frameP,subframeP,ul_sched_frame,ul_sched_subframeP,cqi_req,UE_id,rnti); // increment first rb for next UE allocation - first_rb[CC_id] += rb_table[rb_table_index]; + first_rb_slice[CC_id] += rb_table[rb_table_index]; } else { // round > 0 => retransmission T(T_ENB_MAC_UE_UL_SCHEDULE_RETRANSMISSION, T_INT(module_idP), T_INT(CC_id), T_INT(rnti), T_INT(frameP), T_INT(subframeP), T_INT(harq_pid), T_INT(UE_template->mcs_UL[harq_pid]), - T_INT(first_rb[CC_id]), + T_INT(first_rb_slice[CC_id]), T_INT(rb_table[rb_table_index]), T_INT(round)); // Add UL_config PDUs diff --git a/openair2/LAYER2/MAC/mac.h b/openair2/LAYER2/MAC/mac.h index f8121310b1e413496d6663701662051d256f95ed..ed6171dcebfaa35f74e608f7a86c0d6d1ce3c187 100644 --- a/openair2/LAYER2/MAC/mac.h +++ b/openair2/LAYER2/MAC/mac.h @@ -159,7 +159,7 @@ #define MIN_MAC_HDR_RLC_SIZE (1 + MIN_RLC_PDU_SIZE) /*!\brief maximum number of slices / groups */ -#define MAX_NUM_SLICES 4 +#define MAX_NUM_SLICES 10 #define U_PLANE_INACTIVITY_VALUE 6000 @@ -849,6 +849,9 @@ typedef struct { /// LCGID mapping long lcgidmap[11]; + ///UE logical channel priority + long lcgidpriority[11]; + /// phr information int8_t phr_info; @@ -1099,6 +1102,10 @@ typedef struct { /// Sorting criteria for the UE list in the MAC preprocessor uint16_t sorting_criteria[MAX_NUM_SLICES][CR_NUM]; + uint16_t first_rb_offset[NFAPI_CC_MAX][MAX_NUM_SLICES]; + + int assoc_dl_slice_idx[MAX_MOBILES_PER_ENB]; + int assoc_ul_slice_idx[MAX_MOBILES_PER_ENB]; } UE_list_t; @@ -1118,6 +1125,117 @@ typedef struct { int tail_freelist; ///the tail position of the delete list } UE_free_list_t; +/// Structure for saving the output of each pre_processor instance +typedef struct { + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; + uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; + uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; + uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX]; + uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]; + + uint32_t bytes_lcid[MAX_MOBILES_PER_ENB][MAX_NUM_LCID]; + uint32_t wb_pmi[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; + uint8_t mcs[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; + +} pre_processor_results_t; + +/** + * slice specific scheduler for the DL + */ +typedef void (*slice_scheduler_dl)(module_id_t mod_id, + int slice_idx, + frame_t frame, + sub_frame_t subframe, + int *mbsfn_flag); + +typedef struct { + slice_id_t id; + + /// RB share for each slice + float pct; + + /// whether this slice is isolated from the others + int isol; + + int prio; + + /// Frequency ranges for slice positioning + int pos_low; + int pos_high; + + // max mcs for each slice + int maxmcs; + + /// criteria for sorting policies of the slices + uint32_t sorting; + + /// Accounting policy (just greedy(1) or fair(0) setting for now) + int accounting; + + /// name of available scheduler + char *sched_name; + + /// pointer to the slice specific scheduler in DL + slice_scheduler_dl sched_cb; + +} slice_sched_conf_dl_t; + +typedef void (*slice_scheduler_ul)(module_id_t mod_id, + int slice_idx, + frame_t frame, + sub_frame_t subframe, + unsigned char sched_subframe, + uint16_t *first_rb); + +typedef struct { + slice_id_t id; + + /// RB share for each slice + float pct; + + // MAX MCS for each slice + int maxmcs; + + /// criteria for sorting policies of the slices + uint32_t sorting; + + /// starting RB (RB offset) of UL scheduling + int first_rb; + + /// name of available scheduler + char *sched_name; + + /// pointer to the slice specific scheduler in UL + slice_scheduler_ul sched_cb; + +} slice_sched_conf_ul_t; + + +typedef struct { + /// counter used to indicate when all slices have pre-allocated UEs + //int slice_counter; + + /// indicates whether remaining RBs after first intra-slice allocation will + /// be allocated to UEs of the same slice + int intraslice_share_active; + /// indicates whether remaining RBs after slice allocation will be + /// allocated to UEs of another slice. Isolated slices will be ignored + int interslice_share_active; + + /// number of active DL slices + int n_dl; + slice_sched_conf_dl_t dl[MAX_NUM_SLICES]; + + /// number of active UL slices + int n_ul; + slice_sched_conf_ul_t ul[MAX_NUM_SLICES]; + + pre_processor_results_t pre_processor_results[MAX_NUM_SLICES]; + + /// common rb allocation list between slices + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]; +} slice_info_t; + /*! \brief eNB common channels */ typedef struct { int physCellId; @@ -1238,7 +1356,10 @@ typedef struct eNB_MAC_INST_s { /// UL handle uint32_t ul_handle; UE_list_t UE_list; - + + /// slice-related configuration + slice_info_t slice_info; + ///subband bitmap configuration SBMAP_CONF sbmap_conf; /// CCE table used to build DCI scheduling information diff --git a/openair2/LAYER2/MAC/mac_proto.h b/openair2/LAYER2/MAC/mac_proto.h index aba8c26d92042d55bfb8dcc0393d703f2e7a4323..e0fdade419fab0907c807d392cbb7f3070522ac7 100644 --- a/openair2/LAYER2/MAC/mac_proto.h +++ b/openair2/LAYER2/MAC/mac_proto.h @@ -36,22 +36,6 @@ * @{ */ -/** - * slice specific scheduler - */ -typedef void (*slice_scheduler_dl)(module_id_t mod_id, - slice_id_t slice_id, - frame_t frame, - sub_frame_t subframe, - int *mbsfn_flag); - -typedef void (*slice_scheduler_ul)(module_id_t mod_id, - slice_id_t slice_id, - frame_t frame, - sub_frame_t subframe, - unsigned char sched_subframe, - uint16_t *first_rb); - /** \fn void schedule_mib(module_id_t module_idP,frame_t frameP,sub_frame_t subframe); \brief MIB scheduling for PBCH. This function requests the MIB from RRC and provides it to L1. @param Mod_id Instance ID of eNB @@ -119,12 +103,12 @@ void schedule_ulsch(module_id_t module_idP, frame_t frameP, /** \brief ULSCH Scheduling per RNTI @param Mod_id Instance ID of eNB -@param slice_id Instance slice for this eNB +@param slice_idx Slice instance index for this eNB @param frame Frame index @param subframe Subframe number on which to act @param sched_subframe Subframe number where PUSCH is transmitted (for DAI lookup) */ -void schedule_ulsch_rnti(module_id_t module_idP, slice_id_t slice_idP, frame_t frameP, +void schedule_ulsch_rnti(module_id_t module_idP, int slice_idx, frame_t frameP, sub_frame_t subframe, unsigned char sched_subframe, uint16_t * first_rb); @@ -147,7 +131,7 @@ void fill_DLSCH_dci(module_id_t module_idP,frame_t frameP,sub_frame_t subframe,i void schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframe, int *mbsfn_flag); -void schedule_ue_spec(module_id_t module_idP, slice_id_t slice_idP, +void schedule_ue_spec(module_id_t module_idP, int slice_idxP, frame_t frameP,sub_frame_t subframe, int *mbsfn_flag); void schedule_ue_spec_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t subframe,int *mbsfn_flag); @@ -181,6 +165,10 @@ void add_msg3(module_id_t module_idP, int CC_id, RA_t * ra, frame_t frameP, //main.c +void init_UE_list(UE_list_t *UE_list); + +void init_slice_info(slice_info_t *sli); + int mac_top_init(int eMBMS_active, char *uecap_xer, uint8_t cba_group_active, uint8_t HO_active); @@ -204,24 +192,11 @@ void mac_UE_out_of_sync_ind(module_id_t module_idP, frame_t frameP, void clear_nfapi_information(eNB_MAC_INST * eNB, int CC_idP, frame_t frameP, sub_frame_t subframeP); -void dlsch_scheduler_pre_processor_reset(int module_idP, int UE_id, - uint8_t CC_id, - int frameP, - int subframeP, - int N_RBG, - uint16_t - nb_rbs_required[NFAPI_CC_MAX] - [MAX_MOBILES_PER_ENB], - unsigned char - rballoc_sub[NFAPI_CC_MAX] - [N_RBG_MAX], - unsigned char - MIMO_mode_indicator[NFAPI_CC_MAX] - [N_RBG_MAX]); // eNB functions /* \brief This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done @param Mod_id Instance ID of eNB +@param slice_idxP Slice instance index for the slice in which scheduling happens @param frame Index of frame @param subframe Index of current subframe @param N_RBS Number of resource block groups @@ -229,24 +204,73 @@ void dlsch_scheduler_pre_processor_reset(int module_idP, int UE_id, void dlsch_scheduler_pre_processor(module_id_t module_idP, - slice_id_t slice_idP, - frame_t frameP, - sub_frame_t subframe, - int N_RBG[NFAPI_CC_MAX], - int *mbsfn_flag); - + int slice_idxP, + frame_t frameP, + sub_frame_t subframe, + int *mbsfn_flag, + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]); + +void dlsch_scheduler_pre_processor_reset(module_id_t module_idP, + int slice_idx, + frame_t frameP, + sub_frame_t subframeP, + int min_rb_unit[NFAPI_CC_MAX], + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX], + int *mbsfn_flag); + +void dlsch_scheduler_pre_processor_partitioning(module_id_t Mod_id, + int slice_idx, + const uint8_t rbs_retx[NFAPI_CC_MAX]); + +void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, + int slice_idx, + frame_t frameP, + sub_frame_t subframeP, + int min_rb_unit[NFAPI_CC_MAX], + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]); + +void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id, + int slice_idx, + int min_rb_unit[NFAPI_CC_MAX], + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]); + +void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id, + int slice_idx, + int min_rb_unit[NFAPI_CC_MAX], + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]); + +void slice_priority_sort(module_id_t Mod_id, int slice_list[MAX_NUM_SLICES]); + +void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id, + int frameP, + sub_frame_t subframeP, + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]); + +void dlsch_scheduler_qos_multiplexing(module_id_t Mod_id, + int frameP, + sub_frame_t subframeP); void dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id, - int UE_id, - uint8_t CC_id, - int N_RBG, - int transmission_mode, - int min_rb_unit, - uint8_t N_RB_DL, - uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], - uint16_t nb_rbs_required_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], - unsigned char rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], - unsigned char MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]); + int UE_id, + uint8_t CC_id, + int N_RBG, + int min_rb_unit, + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]); /* \brief Function to trigger the eNB scheduling procedure. It is called by PHY at the beginning of each subframe, \f$n$\f and generates all DLSCH allocations for subframe \f$n\f$ and ULSCH allocations for subframe \f$n+k$\f. @@ -676,8 +700,8 @@ int add_new_ue(module_id_t Mod_id, int CC_id, rnti_t rnti, int harq_pid ); int rrc_mac_remove_ue(module_id_t Mod_id, rnti_t rntiP); -void store_dlsch_buffer(module_id_t Mod_id, slice_id_t slice_id, frame_t frameP, sub_frame_t subframeP); -void assign_rbs_required(module_id_t Mod_id, slice_id_t slice_id, frame_t frameP, sub_frame_t subframe, uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], int min_rb_unit[NFAPI_CC_MAX]); +void store_dlsch_buffer(module_id_t Mod_id, int slice_idx, frame_t frameP, sub_frame_t subframeP); +void assign_rbs_required(module_id_t Mod_id, int slice_idx, frame_t frameP, sub_frame_t subframe, uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], int min_rb_unit[NFAPI_CC_MAX]); int maxround(module_id_t Mod_id, uint16_t rnti, int frame, sub_frame_t subframe, uint8_t ul_flag); @@ -696,20 +720,20 @@ void set_ul_DAI(int module_idP, int frameP, int subframeP); -void ulsch_scheduler_pre_processor(module_id_t module_idP, slice_id_t slice_id, int frameP, +void ulsch_scheduler_pre_processor(module_id_t module_idP, int slice_idx, int frameP, sub_frame_t subframeP, unsigned char sched_subframeP, uint16_t * first_rb); void store_ulsch_buffer(module_id_t module_idP, int frameP, sub_frame_t subframeP); void sort_ue_ul(module_id_t module_idP, int frameP, sub_frame_t subframeP); -void assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, +void assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP, sub_frame_t subframeP, uint16_t * first_rb); void adjust_bsr_info(int buffer_occupancy, uint16_t TBS, UE_TEMPLATE * UE_template); int phy_stats_exist(module_id_t Mod_id, int rnti); -void sort_UEs(module_id_t Mod_idP, slice_id_t slice_id, int frameP, sub_frame_t subframeP); +void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subframeP); /*! \fn UE_L2_state_t ue_scheduler(const module_id_t module_idP,const frame_t frameP, const sub_frame_t subframe, const lte_subframe_t direction,const uint8_t eNB_index) \brief UE scheduler where all the ue background tasks are done. This function performs the following: 1) Trigger PDCP every 5ms 2) Call RRC for link status return to PHY3) Perform SR/BSR procedures for scheduling feedback 4) Perform PHR procedures. @@ -1244,9 +1268,9 @@ void pre_scd_nb_rbs_required( module_id_t module_idP, #endif /*Slice related functions */ -uint16_t flexran_nb_rbs_allowed_slice(float rb_percentage, int total_rbs); - -int ue_slice_membership(int UE_id, int slice_id); +uint16_t nb_rbs_allowed_slice(float rb_percentage, int total_rbs); +int ue_dl_slice_membership(module_id_t mod_id, int UE_id, int slice_idx); +int ue_ul_slice_membership(module_id_t mod_id, int UE_id, int slice_idx); /* from here: prototypes to get rid of compilation warnings: doc to be written by function author */ uint8_t ul_subframe2_k_phich(COMMON_channels_t * cc, sub_frame_t ul_subframe); diff --git a/openair2/LAYER2/MAC/main.c b/openair2/LAYER2/MAC/main.c index da2d9778757985b6b001d526a590733391a397fe..8fb8d44e0abc80301bb20f566162f7b206e45ac6 100644 --- a/openair2/LAYER2/MAC/main.c +++ b/openair2/LAYER2/MAC/main.c @@ -29,6 +29,7 @@ */ +#include <dlfcn.h> #include "mac.h" #include "mac_proto.h" #include "mac_extern.h" @@ -45,96 +46,113 @@ extern RAN_CONTEXT_t RC; - -void mac_top_init_eNB(void) +void init_UE_list(UE_list_t *UE_list) { + int list_el; + UE_list->num_UEs = 0; + UE_list->head = -1; + UE_list->head_ul = -1; + UE_list->avail = 0; + for (list_el = 0; list_el < MAX_MOBILES_PER_ENB - 1; list_el++) { + UE_list->next[list_el] = list_el + 1; + UE_list->next_ul[list_el] = list_el + 1; + } + UE_list->next[list_el] = -1; + UE_list->next_ul[list_el] = -1; + memset(UE_list->DLSCH_pdu, 0, sizeof(UE_list->DLSCH_pdu)); + memset(UE_list->UE_template, 0, sizeof(UE_list->UE_template)); + memset(UE_list->eNB_UE_stats, 0, sizeof(UE_list->eNB_UE_stats)); + memset(UE_list->UE_sched_ctrl, 0, sizeof(UE_list->UE_sched_ctrl)); + memset(UE_list->active, 0, sizeof(UE_list->active)); + memset(UE_list->assoc_dl_slice_idx, 0, sizeof(UE_list->assoc_dl_slice_idx)); + memset(UE_list->assoc_ul_slice_idx, 0, sizeof(UE_list->assoc_ul_slice_idx)); +} - module_id_t i, j; - int list_el; - UE_list_t *UE_list; - eNB_MAC_INST *mac; - - LOG_I(MAC, "[MAIN] Init function start:nb_macrlc_inst=%d\n", - RC.nb_macrlc_inst); +void init_slice_info(slice_info_t *sli) +{ + sli->intraslice_share_active = 1; + sli->interslice_share_active = 1; + + sli->n_dl = 1; + memset(sli->dl, 0, sizeof(slice_sched_conf_dl_t) * MAX_NUM_SLICES); + sli->dl[0].pct = 1.0; + sli->dl[0].prio = 10; + sli->dl[0].pos_high = N_RBG_MAX; + sli->dl[0].maxmcs = 28; + sli->dl[0].sorting = 0x012345; + sli->dl[0].sched_name = "schedule_ue_spec"; + sli->dl[0].sched_cb = dlsym(NULL, sli->dl[0].sched_name); + AssertFatal(sli->dl[0].sched_cb, "DLSCH scheduler callback is NULL\n"); + + sli->n_ul = 1; + memset(sli->ul, 0, sizeof(slice_sched_conf_ul_t) * MAX_NUM_SLICES); + sli->ul[0].pct = 1.0; + sli->ul[0].maxmcs = 20; + sli->ul[0].sorting = 0x0123; + sli->ul[0].sched_name = "schedule_ulsch_rnti"; + sli->ul[0].sched_cb = dlsym(NULL, sli->ul[0].sched_name); + AssertFatal(sli->ul[0].sched_cb, "ULSCH scheduler callback is NULL\n"); +} - if (RC.nb_macrlc_inst > 0) { - if (RC.mac == NULL){ - RC.mac = - (eNB_MAC_INST **) malloc16(RC.nb_macrlc_inst * - sizeof(eNB_MAC_INST *)); - bzero(RC.mac, RC.nb_macrlc_inst * sizeof(eNB_MAC_INST *)); - } - AssertFatal(RC.mac != NULL, - "can't ALLOCATE %zu Bytes for %d eNB_MAC_INST with size %zu \n", - RC.nb_macrlc_inst * sizeof(eNB_MAC_INST *), - RC.nb_macrlc_inst, sizeof(eNB_MAC_INST)); - for (i = 0; i < RC.nb_macrlc_inst; i++) { - if (RC.mac[i] == NULL) { - RC.mac[i] = (eNB_MAC_INST *) malloc16(sizeof(eNB_MAC_INST)); - AssertFatal(RC.mac[i] != NULL, - "can't ALLOCATE %zu Bytes for %d eNB_MAC_INST with size %zu \n", - RC.nb_macrlc_inst * sizeof(eNB_MAC_INST *), - RC.nb_macrlc_inst, sizeof(eNB_MAC_INST)); - LOG_D(MAC, - "[MAIN] ALLOCATE %zu Bytes for %d eNB_MAC_INST @ %p\n", - sizeof(eNB_MAC_INST), RC.nb_macrlc_inst, RC.mac); - bzero(RC.mac[i], sizeof(eNB_MAC_INST)); - } - RC.mac[i]->Mod_id = i; - for (j = 0; j < MAX_NUM_CCs; j++) { - RC.mac[i]->DL_req[j].dl_config_request_body. - dl_config_pdu_list = RC.mac[i]->dl_config_pdu_list[j]; - RC.mac[i]->UL_req[j].ul_config_request_body. - ul_config_pdu_list = RC.mac[i]->ul_config_pdu_list[j]; - for (int k = 0; k < 10; k++) - RC.mac[i]->UL_req_tmp[j][k]. - ul_config_request_body.ul_config_pdu_list = - RC.mac[i]->ul_config_pdu_list_tmp[j][k]; - for(int sf=0;sf<10;sf++){ - RC.mac[i]->HI_DCI0_req[j][sf].hi_dci0_request_body.hi_dci0_pdu_list =RC.mac[i]->hi_dci0_pdu_list[j][sf]; - } - - RC.mac[i]->TX_req[j].tx_request_body.tx_pdu_list = - RC.mac[i]->tx_request_pdu[j]; - RC.mac[i]->ul_handle = 0; - } - } - - AssertFatal(rlc_module_init() == 0, - "Could not initialize RLC layer\n"); - - // These should be out of here later - pdcp_layer_init(); - - rrc_init_global_param(); - - } else { - RC.mac = NULL; +void mac_top_init_eNB(void) +{ + module_id_t i, j; + eNB_MAC_INST **mac; + + LOG_I(MAC, "[MAIN] Init function start:nb_macrlc_inst=%d\n", + RC.nb_macrlc_inst); + + if (RC.nb_macrlc_inst <= 0) { + RC.mac = NULL; + return; + } + + mac = malloc16(RC.nb_macrlc_inst * sizeof(eNB_MAC_INST *)); + AssertFatal(mac != NULL, + "can't ALLOCATE %zu Bytes for %d eNB_MAC_INST with size %zu \n", + RC.nb_macrlc_inst * sizeof(eNB_MAC_INST *), + RC.nb_macrlc_inst, sizeof(eNB_MAC_INST)); + for (i = 0; i < RC.nb_macrlc_inst; i++) { + mac[i] = malloc16(sizeof(eNB_MAC_INST)); + AssertFatal(mac[i] != NULL, + "can't ALLOCATE %zu Bytes for %d eNB_MAC_INST with size %zu \n", + RC.nb_macrlc_inst * sizeof(eNB_MAC_INST *), + RC.nb_macrlc_inst, sizeof(eNB_MAC_INST)); + LOG_D(MAC, + "[MAIN] ALLOCATE %zu Bytes for %d eNB_MAC_INST @ %p\n", + sizeof(eNB_MAC_INST), RC.nb_macrlc_inst, mac); + bzero(mac[i], sizeof(eNB_MAC_INST)); + mac[i]->Mod_id = i; + for (j = 0; j < MAX_NUM_CCs; j++) { + mac[i]->DL_req[j].dl_config_request_body.dl_config_pdu_list = + mac[i]->dl_config_pdu_list[j]; + mac[i]->UL_req[j].ul_config_request_body.ul_config_pdu_list = + mac[i]->ul_config_pdu_list[j]; + for (int k = 0; k < 10; k++) + mac[i]->UL_req_tmp[j][k].ul_config_request_body.ul_config_pdu_list = + mac[i]->ul_config_pdu_list_tmp[j][k]; + for(int sf=0;sf<10;sf++) + mac[i]->HI_DCI0_req[j][sf].hi_dci0_request_body.hi_dci0_pdu_list = + mac[i]->hi_dci0_pdu_list[j][sf]; + mac[i]->TX_req[j].tx_request_body.tx_pdu_list = mac[i]->tx_request_pdu[j]; + mac[i]->ul_handle = 0; } - // Initialize Linked-List for Active UEs - for (i = 0; i < RC.nb_macrlc_inst; i++) { - mac = RC.mac[i]; + mac[i]->if_inst = IF_Module_init(i); + init_UE_list(&mac[i]->UE_list); + init_slice_info(&mac[i]->slice_info); + } - mac->if_inst = IF_Module_init(i); + RC.mac = mac; - UE_list = &mac->UE_list; + AssertFatal(rlc_module_init() == 0, + "Could not initialize RLC layer\n"); - UE_list->num_UEs = 0; - UE_list->head = -1; - UE_list->head_ul = -1; - UE_list->avail = 0; - - for (list_el = 0; list_el < MAX_MOBILES_PER_ENB - 1; list_el++) { - UE_list->next[list_el] = list_el + 1; - UE_list->next_ul[list_el] = list_el + 1; - } - - UE_list->next[list_el] = -1; - UE_list->next_ul[list_el] = -1; - } + // These should be out of here later + pdcp_layer_init(); + rrc_init_global_param(); } void mac_init_cell_params(int Mod_idP, int CC_idP) diff --git a/openair2/LAYER2/MAC/pre_processor.c b/openair2/LAYER2/MAC/pre_processor.c index 16cbd3fc19cbfb26e811451e831ca606ff8880f3..5846720c7f71a357a338631110853e708d33e4e7 100644 --- a/openair2/LAYER2/MAC/pre_processor.c +++ b/openair2/LAYER2/MAC/pre_processor.c @@ -54,14 +54,6 @@ extern RAN_CONTEXT_t RC; #define DEBUG_HEADER_PARSING 1 //#define DEBUG_PACKET_TRACE 1 -extern float slice_percentage[MAX_NUM_SLICES]; -extern float slice_percentage_uplink[MAX_NUM_SLICES]; -extern uint32_t sorting_policy[MAX_NUM_SLICES]; - -extern int slice_maxmcs[MAX_NUM_SLICES]; -extern int slice_maxmcs_uplink[MAX_NUM_SLICES]; - - //#define ICIC 0 /* this function checks that get_eNB_UE_stats returns @@ -95,290 +87,280 @@ int phy_stats_exist(module_id_t Mod_id, int rnti) // This function stores the downlink buffer for all the logical channels void -store_dlsch_buffer(module_id_t Mod_id, slice_id_t slice_id, frame_t frameP, - sub_frame_t subframeP) -{ - - int UE_id, i; - rnti_t rnti; - mac_rlc_status_resp_t rlc_status; - UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; - UE_TEMPLATE *UE_template; +store_dlsch_buffer(module_id_t Mod_id, + int slice_idx, + frame_t frameP, + sub_frame_t subframeP) { - for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) { - if (UE_list->active[UE_id] != TRUE) - continue; + int UE_id, lcid; + rnti_t rnti; + mac_rlc_status_resp_t rlc_status; + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + UE_TEMPLATE *UE_template; - if (!ue_slice_membership(UE_id, slice_id)) + for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) { + if (UE_list->active[UE_id] != TRUE) continue; - UE_template = - &UE_list->UE_template[UE_PCCID(Mod_id, UE_id)][UE_id]; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) + continue; - // clear logical channel interface variables - UE_template->dl_buffer_total = 0; - UE_template->dl_pdus_total = 0; + UE_template = &UE_list->UE_template[UE_PCCID(Mod_id, UE_id)][UE_id]; - for (i = 0; i < MAX_NUM_LCID; i++) { - UE_template->dl_buffer_info[i] = 0; - UE_template->dl_pdus_in_buffer[i] = 0; - UE_template->dl_buffer_head_sdu_creation_time[i] = 0; - UE_template->dl_buffer_head_sdu_remaining_size_to_send[i] = 0; - } + // clear logical channel interface variables + UE_template->dl_buffer_total = 0; + UE_template->dl_pdus_total = 0; + for (lcid = 0; lcid < MAX_NUM_LCID; ++lcid) { + UE_template->dl_buffer_info[lcid] = 0; + UE_template->dl_pdus_in_buffer[lcid] = 0; + UE_template->dl_buffer_head_sdu_creation_time[lcid] = 0; + UE_template->dl_buffer_head_sdu_remaining_size_to_send[lcid] = 0; + } - rnti = UE_RNTI(Mod_id, UE_id); + rnti = UE_RNTI(Mod_id, UE_id); - for (i = 0; i < MAX_NUM_LCID; i++) { // loop over all the logical channels + for (lcid = 0; lcid < MAX_NUM_LCID; ++lcid) { // loop over all the logical channels - rlc_status = - mac_rlc_status_ind(Mod_id, rnti, Mod_id, frameP, subframeP, - ENB_FLAG_YES, MBMS_FLAG_NO, i, 0 + rlc_status = mac_rlc_status_ind(Mod_id, rnti, Mod_id, frameP, subframeP, + ENB_FLAG_YES, MBMS_FLAG_NO, lcid, 0 #if (RRC_VERSION >= MAKE_VERSION(14, 0, 0)) ,0, 0 #endif ); - - UE_template->dl_buffer_info[i] = rlc_status.bytes_in_buffer; //storing the dlsch buffer for each logical channel - UE_template->dl_pdus_in_buffer[i] = rlc_status.pdus_in_buffer; - UE_template->dl_buffer_head_sdu_creation_time[i] = - rlc_status.head_sdu_creation_time; - UE_template->dl_buffer_head_sdu_creation_time_max = - cmax(UE_template->dl_buffer_head_sdu_creation_time_max, - rlc_status.head_sdu_creation_time); - UE_template->dl_buffer_head_sdu_remaining_size_to_send[i] = - rlc_status.head_sdu_remaining_size_to_send; - UE_template->dl_buffer_head_sdu_is_segmented[i] = - rlc_status.head_sdu_is_segmented; - UE_template->dl_buffer_total += UE_template->dl_buffer_info[i]; //storing the total dlsch buffer - UE_template->dl_pdus_total += - UE_template->dl_pdus_in_buffer[i]; + UE_template->dl_buffer_info[lcid] = rlc_status.bytes_in_buffer; //storing the dlsch buffer for each logical channel + UE_template->dl_pdus_in_buffer[lcid] = rlc_status.pdus_in_buffer; + UE_template->dl_buffer_head_sdu_creation_time[lcid] = rlc_status.head_sdu_creation_time; + UE_template->dl_buffer_head_sdu_creation_time_max = + cmax(UE_template->dl_buffer_head_sdu_creation_time_max, rlc_status.head_sdu_creation_time); + UE_template->dl_buffer_head_sdu_remaining_size_to_send[lcid] = rlc_status.head_sdu_remaining_size_to_send; + UE_template->dl_buffer_head_sdu_is_segmented[lcid] = rlc_status.head_sdu_is_segmented; + UE_template->dl_buffer_total += UE_template->dl_buffer_info[lcid]; //storing the total dlsch buffer + UE_template->dl_pdus_total += UE_template->dl_pdus_in_buffer[lcid]; #ifdef DEBUG_eNB_SCHEDULER - /* note for dl_buffer_head_sdu_remaining_size_to_send[i] : - * 0 if head SDU has not been segmented (yet), else remaining size not already segmented and sent - */ - if (UE_template->dl_buffer_info[i] > 0) - LOG_D(MAC, - "[eNB %d][SLICE %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n", - Mod_id, slice_id, frameP, subframeP, UE_id, - i, UE_template->dl_pdus_in_buffer[i], - UE_template->dl_buffer_info[i], - UE_template->dl_buffer_head_sdu_creation_time[i], - UE_template-> - dl_buffer_head_sdu_remaining_size_to_send[i], - UE_template->dl_buffer_head_sdu_is_segmented[i]); + /* note for dl_buffer_head_sdu_remaining_size_to_send[lcid] : + * 0 if head SDU has not been segmented (yet), else remaining size not already segmented and sent + */ + if (UE_template->dl_buffer_info[lcid] > 0) + LOG_D(MAC, + "[eNB %d][SLICE %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n", + Mod_id, RC.mac[Mod_id]->slice_info.dl[slice_idx].id, frameP, + subframeP, UE_id, lcid, UE_template->dl_pdus_in_buffer[lcid], + UE_template->dl_buffer_info[lcid], + UE_template->dl_buffer_head_sdu_creation_time[lcid], + UE_template->dl_buffer_head_sdu_remaining_size_to_send[lcid], + UE_template->dl_buffer_head_sdu_is_segmented[lcid]); #endif - } - - //#ifdef DEBUG_eNB_SCHEDULER - if (UE_template->dl_buffer_total > 0) - LOG_D(MAC, - "[eNB %d] Frame %d Subframe %d : RLC status for UE %d : total DL buffer size %d and total number of pdu %d \n", - Mod_id, frameP, subframeP, UE_id, - UE_template->dl_buffer_total, - UE_template->dl_pdus_total); - //#endif } + + if (UE_template->dl_buffer_total > 0) + LOG_D(MAC, + "[eNB %d] Frame %d Subframe %d : RLC status for UE %d : total DL buffer size %d and total number of pdu %d \n", + Mod_id, frameP, subframeP, UE_id, + UE_template->dl_buffer_total, + UE_template->dl_pdus_total); + } } +int cqi2mcs(int cqi) { + return cqi_to_mcs[cqi]; +} // This function returns the estimated number of RBs required by each UE for downlink scheduling void assign_rbs_required(module_id_t Mod_id, - slice_id_t slice_id, - frame_t frameP, - sub_frame_t subframe, - uint16_t - nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], - int min_rb_unit[NFAPI_CC_MAX]) + int slice_idx, + frame_t frameP, + sub_frame_t subframe, + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + int min_rb_unit[NFAPI_CC_MAX]) { - uint16_t TBS = 0; + uint16_t TBS = 0; - int UE_id, n, i, j, CC_id, pCCid, tmp; - UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; - eNB_UE_STATS *eNB_UE_stats, *eNB_UE_stats_i, *eNB_UE_stats_j; - int N_RB_DL; + int UE_id, n, i, j, CC_id, pCCid, tmp; + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + slice_info_t *sli = &RC.mac[Mod_id]->slice_info; + eNB_UE_STATS *eNB_UE_stats, *eNB_UE_stats_i, *eNB_UE_stats_j; + int N_RB_DL; - // clear rb allocations across all CC_id - for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) { - if (UE_list->active[UE_id] != TRUE) - continue; - if (!ue_slice_membership(UE_id, slice_id)) - continue; - pCCid = UE_PCCID(Mod_id, UE_id); - - //update CQI information across component carriers - for (n = 0; n < UE_list->numactiveCCs[UE_id]; n++) { - CC_id = UE_list->ordered_CCids[n][UE_id]; - eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id]; - - eNB_UE_stats->dlsch_mcs1 =cmin(cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]], - slice_maxmcs[slice_id]); - - } - - // provide the list of CCs sorted according to MCS - for (i = 0; i < UE_list->numactiveCCs[UE_id]; i++) { - eNB_UE_stats_i = - &UE_list->eNB_UE_stats[UE_list-> - ordered_CCids[i][UE_id]][UE_id]; - for (j = i + 1; j < UE_list->numactiveCCs[UE_id]; j++) { - DevAssert(j < NFAPI_CC_MAX); - eNB_UE_stats_j = - &UE_list-> - eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]][UE_id]; - if (eNB_UE_stats_j->dlsch_mcs1 > - eNB_UE_stats_i->dlsch_mcs1) { - tmp = UE_list->ordered_CCids[i][UE_id]; - UE_list->ordered_CCids[i][UE_id] = - UE_list->ordered_CCids[j][UE_id]; - UE_list->ordered_CCids[j][UE_id] = tmp; - } - } - } - - if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total > 0) { - LOG_D(MAC, "[preprocessor] assign RB for UE %d\n", UE_id); - - for (i = 0; i < UE_list->numactiveCCs[UE_id]; i++) { - CC_id = UE_list->ordered_CCids[i][UE_id]; - eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id]; - - if (eNB_UE_stats->dlsch_mcs1 == 0) { - nb_rbs_required[CC_id][UE_id] = 4; // don't let the TBS get too small - } else { - nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id]; - } - - TBS = - get_TBS_DL(eNB_UE_stats->dlsch_mcs1, - nb_rbs_required[CC_id][UE_id]); - - LOG_D(MAC, - "[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n", - UE_id, CC_id, - UE_list->UE_template[pCCid][UE_id].dl_buffer_total, - nb_rbs_required[CC_id][UE_id], - eNB_UE_stats->dlsch_mcs1, TBS); - - N_RB_DL = - to_prb(RC.mac[Mod_id]->common_channels[CC_id]. - mib->message.dl_Bandwidth); - - UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_id]= flexran_nb_rbs_allowed_slice(slice_percentage[slice_id],N_RB_DL); - - /* calculating required number of RBs for each UE */ - while (TBS < - UE_list->UE_template[pCCid][UE_id]. - dl_buffer_total) { - nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id]; - - if (nb_rbs_required[CC_id][UE_id] > UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_id]) { - TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_id]); - nb_rbs_required[CC_id][UE_id] = UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_id]; - break; - } - - TBS = - get_TBS_DL(eNB_UE_stats->dlsch_mcs1, - nb_rbs_required[CC_id][UE_id]); - } // end of while - - LOG_D(MAC, - "[eNB %d] Frame %d: UE %d on CC %d: RB unit %d, nb_required RB %d (TBS %d, mcs %d)\n", - Mod_id, frameP, UE_id, CC_id, min_rb_unit[CC_id], - nb_rbs_required[CC_id][UE_id], TBS, - eNB_UE_stats->dlsch_mcs1); - } - } + // clear rb allocations across all CC_id + for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) { + if (UE_list->active[UE_id] != TRUE) continue; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue; + pCCid = UE_PCCID(Mod_id, UE_id); + + //update CQI information across component carriers + for (n = 0; n < UE_list->numactiveCCs[UE_id]; n++) { + + CC_id = UE_list->ordered_CCids[n][UE_id]; + eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id]; +// eNB_UE_stats->dlsch_mcs1 = cmin(cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]], sli->dl[slice_idx].maxmcs); + eNB_UE_stats->dlsch_mcs1 = cmin(cqi2mcs(UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]), sli->dl[slice_idx].maxmcs); + + } + + // provide the list of CCs sorted according to MCS + for (i = 0; i < UE_list->numactiveCCs[UE_id]; ++i) { + eNB_UE_stats_i = &UE_list->eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]][UE_id]; + for (j = i + 1; j < UE_list->numactiveCCs[UE_id]; j++) { + DevAssert(j < NFAPI_CC_MAX); + eNB_UE_stats_j = &UE_list->eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]][UE_id]; + if (eNB_UE_stats_j->dlsch_mcs1 > eNB_UE_stats_i->dlsch_mcs1) { + tmp = UE_list->ordered_CCids[i][UE_id]; + UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id]; + UE_list->ordered_CCids[j][UE_id] = tmp; + } + } + } + + if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total > 0) { + LOG_D(MAC, "[preprocessor] assign RB for UE %d\n", UE_id); + + for (i = 0; i < UE_list->numactiveCCs[UE_id]; i++) { + CC_id = UE_list->ordered_CCids[i][UE_id]; + eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id]; + + if (eNB_UE_stats->dlsch_mcs1 == 0) { + nb_rbs_required[CC_id][UE_id] = 4; // don't let the TBS get too small + } else { + nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id]; + } + + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rbs_required[CC_id][UE_id]); + + LOG_D(MAC, + "[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n", + UE_id, CC_id, + UE_list->UE_template[pCCid][UE_id].dl_buffer_total, + nb_rbs_required[CC_id][UE_id], + eNB_UE_stats->dlsch_mcs1, TBS); + + N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth); + + UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx] = + nb_rbs_allowed_slice(sli->dl[slice_idx].pct, N_RB_DL); + + /* calculating required number of RBs for each UE */ + while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total) { + nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id]; + + if (nb_rbs_required[CC_id][UE_id] > UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]) { + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]); + nb_rbs_required[CC_id][UE_id] = UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]; + break; + } + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rbs_required[CC_id][UE_id]); + } // end of while + + LOG_D(MAC, + "[eNB %d] Frame %d: UE %d on CC %d: RB unit %d, nb_required RB %d (TBS %d, mcs %d)\n", + Mod_id, frameP, UE_id, CC_id, min_rb_unit[CC_id], + nb_rbs_required[CC_id][UE_id], TBS, + eNB_UE_stats->dlsch_mcs1); + + sli->pre_processor_results[slice_idx].mcs[CC_id][UE_id] = eNB_UE_stats->dlsch_mcs1; + } } + } } // This function scans all CC_ids for a particular UE to find the maximum round index of its HARQ processes - int maxround(module_id_t Mod_id, uint16_t rnti, int frame, - sub_frame_t subframe, uint8_t ul_flag) -{ - - uint8_t round, round_max = 0, UE_id; - int CC_id, harq_pid; - UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; - COMMON_channels_t *cc; + sub_frame_t subframe, uint8_t ul_flag) { - for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) { - - cc = &RC.mac[Mod_id]->common_channels[CC_id]; + uint8_t round, round_max = 0, UE_id; + int CC_id, harq_pid; + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + COMMON_channels_t *cc; - UE_id = find_UE_id(Mod_id, rnti); + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) { + cc = &RC.mac[Mod_id]->common_channels[CC_id]; - harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frame ,subframe); + UE_id = find_UE_id(Mod_id, rnti); + harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frame ,subframe); - round = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid]; - if (round > round_max) { - round_max = round; - } + round = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid]; + if (round > round_max) { + round_max = round; } + } - return round_max; + return round_max; } // This function scans all CC_ids for a particular UE to find the maximum DL CQI // it returns -1 if the UE is not found in PHY layer (get_eNB_UE_stats gives NULL) -int maxcqi(module_id_t Mod_id, int32_t UE_id) -{ - UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; - int CC_id, n; - int CQI = 0; +int maxcqi(module_id_t Mod_id, int32_t UE_id) { + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + int CC_id, n; + int CQI = 0; - for (n = 0; n < UE_list->numactiveCCs[UE_id]; n++) { - CC_id = UE_list->ordered_CCids[n][UE_id]; + for (n = 0; n < UE_list->numactiveCCs[UE_id]; n++) { + CC_id = UE_list->ordered_CCids[n][UE_id]; - if (UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id] > CQI) { - CQI = UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]; - } + if (UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id] > CQI) { + CQI = UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]; } + } + + return CQI; +} + +long min_lcgidpriority(module_id_t Mod_id, int32_t UE_id) { + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + int i; + int pCC_id = UE_PCCID(Mod_id, UE_id); + long ret = UE_list->UE_template[pCC_id][UE_id].lcgidpriority[0]; + + for (i = 1; i < 11; ++i) { + if (UE_list->UE_template[pCC_id][UE_id].lcgidpriority[i] < ret) + ret = UE_list->UE_template[pCC_id][UE_id].lcgidpriority[i]; + } - return CQI; + return ret; } struct sort_ue_dl_params { int Mod_idP; int frameP; int subframeP; - int slice_id; + int slice_idx; }; static int ue_dl_compare(const void *_a, const void *_b, void *_params) { - struct sort_ue_dl_params *params = _params; - UE_list_t *UE_list = &RC.mac[params->Mod_idP]->UE_list; + struct sort_ue_dl_params *params = _params; + UE_list_t *UE_list = &RC.mac[params->Mod_idP]->UE_list; - int i; - int slice_id = params->slice_id; - int UE_id1 = *(const int *) _a; - int UE_id2 = *(const int *) _b; + int i; + int slice_idx = params->slice_idx; + int UE_id1 = *(const int *) _a; + int UE_id2 = *(const int *) _b; - int rnti1 = UE_RNTI(params->Mod_idP, UE_id1); - int pCC_id1 = UE_PCCID(params->Mod_idP, UE_id1); - int round1 = maxround(params->Mod_idP, rnti1, params->frameP, params->subframeP, 1); + int rnti1 = UE_RNTI(params->Mod_idP, UE_id1); + int pCC_id1 = UE_PCCID(params->Mod_idP, UE_id1); + int round1 = maxround(params->Mod_idP, rnti1, params->frameP, params->subframeP, 1); - int rnti2 = UE_RNTI(params->Mod_idP, UE_id2); - int pCC_id2 = UE_PCCID(params->Mod_idP, UE_id2); - int round2 = maxround(params->Mod_idP, rnti2, params->frameP, params->subframeP, 1); + int rnti2 = UE_RNTI(params->Mod_idP, UE_id2); + int pCC_id2 = UE_PCCID(params->Mod_idP, UE_id2); + int round2 = maxround(params->Mod_idP, rnti2, params->frameP, params->subframeP, 1); - int cqi1 = maxcqi(params->Mod_idP, UE_id1); - int cqi2 = maxcqi(params->Mod_idP, UE_id2); + int cqi1 = maxcqi(params->Mod_idP, UE_id1); + int cqi2 = maxcqi(params->Mod_idP, UE_id2); + + long lcgid1 = min_lcgidpriority(params->Mod_idP, UE_id1); + long lcgid2 = min_lcgidpriority(params->Mod_idP, UE_id2); for (i = 0; i < CR_NUM; ++i) { - switch (UE_list->sorting_criteria[slice_id][i]) { + switch (UE_list->sorting_criteria[slice_idx][i]) { case CR_ROUND : if (round1 > round2) @@ -423,6 +405,13 @@ static int ue_dl_compare(const void *_a, const void *_b, void *_params) return -1; if (cqi1 < cqi2) return 1; + break; + + case CR_LCP : + if (lcgid1 < lcgid2) + return -1; + if (lcgid1 > lcgid2) + return 1; default : break; @@ -432,190 +421,272 @@ static int ue_dl_compare(const void *_a, const void *_b, void *_params) return 0; } -void decode_sorting_policy(module_id_t Mod_idP, slice_id_t slice_id) { - int i; +void decode_sorting_policy(module_id_t Mod_idP, int slice_idx) { + int i; - UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list; - uint32_t policy = sorting_policy[slice_id]; - uint32_t mask = 0x0000000F; - uint16_t criterion; + UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list; + uint32_t policy = RC.mac[Mod_idP]->slice_info.dl[slice_idx].sorting; + uint32_t mask = 0x0000000F; + uint16_t criterion; - for(i = 0; i < CR_NUM; ++i) { - criterion = (uint16_t)(policy >> 4*(CR_NUM - 1 - i) & mask); - if (criterion >= CR_NUM) { - LOG_W(MAC, "Invalid criterion in slice %d policy, revert to default policy \n", slice_id); - sorting_policy[slice_id] = 0x1234; - break; - } - UE_list->sorting_criteria[slice_id][i] = criterion; - } + for (i = 0; i < CR_NUM; ++i) { + criterion = (uint16_t) (policy >> 4 * (CR_NUM - 1 - i) & mask); + if (criterion >= CR_NUM) { + LOG_W(MAC, + "Invalid criterion in slice index %d ID %d policy, revert to default policy \n", + slice_idx, RC.mac[Mod_idP]->slice_info.dl[slice_idx].id); + RC.mac[Mod_idP]->slice_info.dl[slice_idx].sorting = 0x12345; + break; + } + UE_list->sorting_criteria[slice_idx][i] = criterion; + } +} + +void decode_slice_positioning(module_id_t Mod_idP, + int slice_idx, + uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX]) +{ + uint8_t CC_id; + int RBG, start_frequency, end_frequency; + + // Init slice_alloc_mask + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_idP]; ++CC_id) { + for (RBG = 0; RBG < N_RBG_MAX; ++RBG) { + slice_allocation_mask[CC_id][RBG] = 0; + } + } + + start_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_idx].pos_low; + end_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_idx].pos_high; + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_idP]; ++CC_id) { + for (RBG = start_frequency; RBG <= end_frequency; ++RBG) { + slice_allocation_mask[CC_id][RBG] = 1; + } + } } // This fuction sorts the UE in order their dlsch buffer and CQI -void sort_UEs(module_id_t Mod_idP, slice_id_t slice_id, int frameP, sub_frame_t subframeP) +void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subframeP) { - int i; - int list[MAX_MOBILES_PER_ENB]; - int list_size = 0; - int rnti; - struct sort_ue_dl_params params = { Mod_idP, frameP, subframeP, slice_id }; + int i; + int list[MAX_MOBILES_PER_ENB]; + int list_size = 0; + struct sort_ue_dl_params params = {Mod_idP, frameP, subframeP, slice_idx}; - UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list; + UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list; - for (i = 0; i < MAX_MOBILES_PER_ENB; i++) { + for (i = 0; i < MAX_MOBILES_PER_ENB; i++) { + + if (UE_list->active[i] == FALSE) continue; + if (UE_RNTI(Mod_idP, i) == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1) continue; + if (!ue_dl_slice_membership(Mod_idP, i, slice_idx)) continue; + + list[list_size] = i; + list_size++; + } + + decode_sorting_policy(Mod_idP, slice_idx); + + qsort_r(list, list_size, sizeof(int), ue_dl_compare, ¶ms); - if (UE_list->active[i] == FALSE) - continue; - if ((rnti = UE_RNTI(Mod_idP, i)) == NOT_A_RNTI) - continue; - if (!ue_slice_membership(i, slice_id)) - continue; + if (list_size) { + for (i = 0; i < list_size - 1; ++i) + UE_list->next[list[i]] = list[i + 1]; + UE_list->next[list[list_size - 1]] = -1; + UE_list->head = list[0]; + } else { + UE_list->head = -1; + } +} + +void dlsch_scheduler_pre_processor_partitioning(module_id_t Mod_id, + int slice_idx, + const uint8_t rbs_retx[NFAPI_CC_MAX]) +{ + int UE_id, CC_id, N_RB_DL, i; + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + UE_sched_ctrl *ue_sched_ctl; + uint16_t available_rbs; - list[list_size] = i; - list_size++; - } + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - decode_sorting_policy(Mod_idP, slice_id); + if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue; - qsort_r(list, list_size, sizeof(int), ue_dl_compare, ¶ms); + ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; - if (list_size) { - for (i = 0; i < list_size - 1; i++) - UE_list->next[list[i]] = list[i + 1]; - UE_list->next[list[list_size - 1]] = -1; - UE_list->head = list[0]; - } else { - UE_list->head = -1; - } + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) { + CC_id = UE_list->ordered_CCids[i][UE_id]; + N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth); + available_rbs = nb_rbs_allowed_slice(RC.mac[Mod_id]->slice_info.dl[slice_idx].pct, N_RB_DL); + if (rbs_retx[CC_id] < available_rbs) + ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx] = available_rbs - rbs_retx[CC_id]; + else + ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx] = 0; + } + } } void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, - slice_id_t slice_id, + int slice_idx, frame_t frameP, sub_frame_t subframeP, - int N_RBG[NFAPI_CC_MAX], int min_rb_unit[NFAPI_CC_MAX], - uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], - uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX], - uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]) { - - + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]) +{ int UE_id, CC_id; - int ii, r1; + int i; rnti_t rnti; - uint8_t harq_pid, round, transmission_mode; - uint8_t total_rbs_used[NFAPI_CC_MAX]; - uint8_t total_ue_count[NFAPI_CC_MAX]; + uint8_t harq_pid, round; + uint16_t available_rbs[NFAPI_CC_MAX]; + uint8_t rbs_retx[NFAPI_CC_MAX]; uint16_t average_rbs_per_user[NFAPI_CC_MAX]; - uint16_t nb_rbs_required_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; - uint16_t nb_rbs_required_remaining_1[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; + int total_ue_count[NFAPI_CC_MAX]; + int ue_count_newtx[NFAPI_CC_MAX]; + int ue_count_retx[NFAPI_CC_MAX]; + //uint8_t ue_retx_flag[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; - int N_RB_DL; UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; UE_sched_ctrl *ue_sched_ctl; COMMON_channels_t *cc; + // Reset for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) { total_ue_count[CC_id] = 0; - total_rbs_used[CC_id] = 0; + ue_count_newtx[CC_id] = 0; + ue_count_retx[CC_id] = 0; + rbs_retx[CC_id] = 0; average_rbs_per_user[CC_id] = 0; - for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; ++UE_id) { - nb_rbs_required_remaining[CC_id][UE_id] = 0; - } + available_rbs[CC_id] = 0; + //for (UE_id = 0; UE_id < NFAPI_CC_MAX; ++UE_id) { + // ue_retx_flag[CC_id][UE_id] = 0; + //} } - // loop over all active UEs + // Find total UE count, and account the RBs required for retransmissions for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { rnti = UE_RNTI(Mod_id, UE_id); + if (rnti == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue; - if (rnti == NOT_A_RNTI) - continue; - if (!ue_slice_membership(UE_id, slice_id)) - continue; - - for (ii = 0; ii < UE_num_active_CC(UE_list, UE_id); ii++) { - CC_id = UE_list->ordered_CCids[ii][UE_id]; + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) { + CC_id = UE_list->ordered_CCids[i][UE_id]; ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; cc = &RC.mac[Mod_id]->common_channels[CC_id]; - // TODO Can we use subframe2harqpid() here? - if (cc->tdd_Config) - harq_pid = ((frameP * 10) + subframeP) % 10; - else - harq_pid = ((frameP * 10) + subframeP) & 7; + harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP ,subframeP); round = ue_sched_ctl->round[CC_id][harq_pid]; - average_rbs_per_user[CC_id] = 0; + if (nb_rbs_required[CC_id][UE_id] > 0) { + total_ue_count[CC_id]++; + } if (round != 8) { nb_rbs_required[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid]; - total_rbs_used[CC_id] += nb_rbs_required[CC_id][UE_id]; - } - - //nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id]; - if (nb_rbs_required[CC_id][UE_id] > 0) { - total_ue_count[CC_id] = total_ue_count[CC_id] + 1; + rbs_retx[CC_id] += nb_rbs_required[CC_id][UE_id]; + ue_count_retx[CC_id]++; + //ue_retx_flag[CC_id][UE_id] = 1; + } else { + ue_count_newtx[CC_id]++; } } } - // loop over all active UEs and calculate avg rb per user based on total active UEs - for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - rnti = UE_RNTI(Mod_id, UE_id); + // PARTITIONING + // Reduces the available RBs according to slicing configuration + dlsch_scheduler_pre_processor_partitioning(Mod_id, slice_idx, rbs_retx); - if (rnti == NOT_A_RNTI) - continue; - if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) - continue; - if (!ue_slice_membership(UE_id, slice_id)) - continue; + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; ++CC_id) { + if (UE_list->head < 0) continue; // no UEs in list + // max_rbs_allowed_slice is saved in every UE, so take it from the first one + ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_list->head]; + available_rbs[CC_id] = ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx]; + } - for (ii = 0; ii < UE_num_active_CC(UE_list, UE_id); ii++) { - CC_id = UE_list->ordered_CCids[ii][UE_id]; + switch (RC.mac[Mod_id]->slice_info.dl[slice_idx].accounting) { - // hypothetical assignment - /* - * If schedule is enabled and if the priority of the UEs is modified - * The average rbs per logical channel per user will depend on the level of - * priority. Concerning the hypothetical assignement, we should assign more - * rbs to prioritized users. Maybe, we can do a mapping between the - * average rbs per user and the level of priority or multiply the average rbs - * per user by a coefficient which represents the degree of priority. - */ + // If greedy scheduling, try to account all the required RBs + case POL_GREEDY: - N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth) - total_rbs_used[CC_id]; + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + rnti = UE_RNTI(Mod_id, UE_id); + if (rnti == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue; - // recalculate based on the what is left after retransmission - ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; - ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id] = - flexran_nb_rbs_allowed_slice(slice_percentage[slice_id], N_RB_DL); + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) { + CC_id = UE_list->ordered_CCids[i][UE_id]; + if (available_rbs[CC_id] == 0) continue; + nb_rbs_accounted[CC_id][UE_id] = cmin(nb_rbs_required[CC_id][UE_id], available_rbs[CC_id]); + available_rbs[CC_id] -= nb_rbs_accounted[CC_id][UE_id]; + } + } + break; - if (total_ue_count[CC_id] == 0) { - average_rbs_per_user[CC_id] = 0; - } else if ((min_rb_unit[CC_id] * total_ue_count[CC_id]) <= - (ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id])) { - average_rbs_per_user[CC_id] = - (uint16_t) floor(ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id] / total_ue_count[CC_id]); - } else { - // consider the total number of use that can be scheduled UE - average_rbs_per_user[CC_id] = min_rb_unit[CC_id]; + // Use the old, fair algorithm + // Loop over all active UEs and account the avg number of RBs to each UE, based on all non-retx UEs. + // case POL_FAIR: + default: + // FIXME: This is not ideal, why loop on UEs to find average_rbs_per_user[], that is per-CC? + // TODO: Look how to loop on active CCs only without using the UE_num_active_CC() function. + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + rnti = UE_RNTI(Mod_id, UE_id); + + if (rnti == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue; + + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) { + + CC_id = UE_list->ordered_CCids[i][UE_id]; + ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; + available_rbs[CC_id] = ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx]; + + if (ue_count_newtx[CC_id] == 0) { + average_rbs_per_user[CC_id] = 0; + } else if (min_rb_unit[CC_id]*ue_count_newtx[CC_id] <= available_rbs[CC_id]) { + average_rbs_per_user[CC_id] = (uint16_t)floor(available_rbs[CC_id]/ue_count_newtx[CC_id]); + } else { + // consider the total number of use that can be scheduled UE + average_rbs_per_user[CC_id] = (uint16_t)min_rb_unit[CC_id]; + } + } } - } + + // note: nb_rbs_required is assigned according to total_buffer_dl + // extend nb_rbs_required to capture per LCID RB required + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + rnti = UE_RNTI(Mod_id, UE_id); + if (rnti == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue; + + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) { + CC_id = UE_list->ordered_CCids[i][UE_id]; + nb_rbs_accounted[CC_id][UE_id] = cmin(average_rbs_per_user[CC_id], nb_rbs_required[CC_id][UE_id]); + } + } + break; } - // note: nb_rbs_required is assigned according to total_buffer_dl - // extend nb_rbs_required to capture per LCID RB required + + + + // Check retransmissions + // TODO: Do this once at the beginning for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { rnti = UE_RNTI(Mod_id, UE_id); + if (rnti == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue; - if (rnti == NOT_A_RNTI) - continue; - if (!ue_slice_membership(UE_id, slice_id)) - continue; - - for (ii = 0; ii < UE_num_active_CC(UE_list, UE_id); ii++) { - CC_id = UE_list->ordered_CCids[ii][UE_id]; + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) { + CC_id = UE_list->ordered_CCids[i][UE_id]; ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; cc = &RC.mac[Mod_id]->common_channels[CC_id]; harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP ,subframeP); @@ -624,276 +695,494 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, // control channel or retransmission /* TODO: do we have to check for retransmission? */ if (mac_eNB_get_rrc_status(Mod_id, rnti) < RRC_RECONFIGURED || round != 8) { - nb_rbs_required_remaining_1[CC_id][UE_id] = - nb_rbs_required[CC_id][UE_id]; - } else { - nb_rbs_required_remaining_1[CC_id][UE_id] = - cmin(average_rbs_per_user[CC_id], nb_rbs_required[CC_id][UE_id]); + nb_rbs_accounted[CC_id][UE_id] = nb_rbs_required[CC_id][UE_id]; } } } +} - //Allocation to UEs is done in 2 rounds, - // 1st stage: average number of RBs allocated to each UE - // 2nd stage: remaining RBs are allocated to high priority UEs - for (r1 = 0; r1 < 2; r1++) { - - for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - for (ii = 0; ii < UE_num_active_CC(UE_list, UE_id); ii++) { - CC_id = UE_list->ordered_CCids[ii][UE_id]; - - if (r1 == 0) { - nb_rbs_required_remaining[CC_id][UE_id] = - nb_rbs_required_remaining_1[CC_id][UE_id]; - } else { // rb required based only on the buffer - rb allocated in the 1st round + extra reaming rb form the 1st round - nb_rbs_required_remaining[CC_id][UE_id] = - nb_rbs_required[CC_id][UE_id] - - nb_rbs_required_remaining_1[CC_id][UE_id] + - nb_rbs_required_remaining[CC_id][UE_id]; - if (nb_rbs_required_remaining[CC_id][UE_id] < 0) - abort(); - } +void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id, + int slice_idx, + int min_rb_unit[NFAPI_CC_MAX], + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) +{ + int UE_id, CC_id; + int i; +#ifdef TM5 + uint8_t transmission_mode; +#endif + uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX]; + int N_RBG[NFAPI_CC_MAX]; + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; - if (nb_rbs_required[CC_id][UE_id] > 0) - LOG_D(MAC, - "round %d : nb_rbs_required_remaining[%d][%d]= %d (remaining_1 %d, required %d, pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n", - r1, CC_id, UE_id, - nb_rbs_required_remaining[CC_id][UE_id], - nb_rbs_required_remaining_1[CC_id][UE_id], - nb_rbs_required[CC_id][UE_id], - UE_list->UE_sched_ctrl[UE_id].pre_nb_available_rbs[CC_id], - N_RBG[CC_id], - min_rb_unit[CC_id]); + decode_slice_positioning(Mod_id, slice_idx, slice_allocation_mask); - } - } + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) { + COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id]; + N_RBG[CC_id] = to_rbg(cc->mib->message.dl_Bandwidth); + } - for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + // Try to allocate accounted RBs + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - for (ii = 0; ii < UE_num_active_CC(UE_list, UE_id); ii++) { + if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue; - CC_id = UE_list->ordered_CCids[ii][UE_id]; - // if there are UEs with traffic - if (total_ue_count[CC_id] > 0) { - // ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; - // round = ue_sched_ctl->round[CC_id][harq_pid]; + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) { + CC_id = UE_list->ordered_CCids[i][UE_id]; + nb_rbs_remaining[CC_id][UE_id] = nb_rbs_accounted[CC_id][UE_id]; +#ifdef TM5 + transmission_mode = get_tmode(Mod_id, CC_id, UE_id); +#endif - rnti = UE_RNTI(Mod_id, UE_id); + if (nb_rbs_required[CC_id][UE_id] > 0) + LOG_D(MAC, + "Step 1: nb_rbs_remaining[%d][%d]= %d (accounted %d, required %d, pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n", + CC_id, + UE_id, + nb_rbs_remaining[CC_id][UE_id], + nb_rbs_accounted[CC_id][UE_id], + nb_rbs_required[CC_id][UE_id], + UE_list->UE_sched_ctrl[UE_id].pre_nb_available_rbs[CC_id], + N_RBG[CC_id], + min_rb_unit[CC_id]); + + LOG_T(MAC, "calling dlsch_scheduler_pre_processor_allocate .. \n "); + dlsch_scheduler_pre_processor_allocate(Mod_id, + UE_id, + CC_id, + N_RBG[CC_id], + min_rb_unit[CC_id], + nb_rbs_required, + nb_rbs_remaining, + rballoc_sub, + slice_allocation_mask, + MIMO_mode_indicator); - // LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti ); - if (rnti == NOT_A_RNTI) - continue; - if (!ue_slice_membership(UE_id, slice_id)) - continue; +#ifdef TM5 + // data chanel TM5: to be revisited + if ((round == 0) && + (transmission_mode == 5) && + (ue_sched_ctl->dl_pow_off[CC_id] != 1)) { + + for (j = 0; j < N_RBG[CC_id]; j += 2) { + + if ((((j == (N_RBG[CC_id] - 1)) + && (rballoc_sub[CC_id][j] == 0) + && (ue_sched_ctl-> + rballoc_sub_UE[CC_id][j] == 0)) + || ((j < (N_RBG[CC_id] - 1)) + && (rballoc_sub[CC_id][j + 1] == 0) + && + (ue_sched_ctl->rballoc_sub_UE + [CC_id][j + 1] == 0))) + && (nb_rbs_remaining[CC_id][UE_id] + > 0)) { + + for (i = UE_list->next[UE_id + 1]; i >= 0; + i = UE_list->next[i]) { + + UE_id2 = i; + rnti2 = UE_RNTI(Mod_id, UE_id2); + ue_sched_ctl2 = + &UE_list->UE_sched_ctrl[UE_id2]; + round2 = ue_sched_ctl2->round[CC_id]; + if (rnti2 == NOT_A_RNTI) + continue; + if (UE_list-> + UE_sched_ctrl + [UE_id2].ul_out_of_sync == 1) + continue; + + eNB_UE_stats2 = + UE_list-> + eNB_UE_stats[CC_id][UE_id2]; + //mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0); + + if ((mac_eNB_get_rrc_status + (Mod_id, + rnti2) >= RRC_RECONFIGURED) + && (round2 == 0) + && + (get_tmode(Mod_id, CC_id, UE_id2) + == 5) + && (ue_sched_ctl-> + dl_pow_off[CC_id] != 1)) { - transmission_mode = get_tmode(Mod_id, CC_id, UE_id); - // mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0); - // rrc_status = mac_eNB_get_rrc_status(Mod_id,rnti); - /* 1st allocate for the retx */ - - // retransmission in data channels - // control channel in the 1st transmission - // data channel for all TM - LOG_T(MAC, - "calling dlsch_scheduler_pre_processor_allocate .. \n "); - dlsch_scheduler_pre_processor_allocate(Mod_id, UE_id, - CC_id, - N_RBG[CC_id], - transmission_mode, - min_rb_unit - [CC_id], - to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth), - nb_rbs_required, - nb_rbs_required_remaining, - rballoc_sub, - MIMO_mode_indicator); + if ((((j == (N_RBG[CC_id] - 1)) + && + (ue_sched_ctl->rballoc_sub_UE + [CC_id][j] == 0)) + || ((j < (N_RBG[CC_id] - 1)) + && + (ue_sched_ctl-> + rballoc_sub_UE[CC_id][j + + 1] + == 0))) + && + (nb_rbs_remaining + [CC_id] + [UE_id2] > 0)) { + + if ((((eNB_UE_stats2-> + DL_pmi_single ^ + eNB_UE_stats1-> + DL_pmi_single) + << (14 - j)) & 0xc000) == 0x4000) { //MU-MIMO only for 25 RBs configuration + + rballoc_sub[CC_id][j] = 1; + ue_sched_ctl-> + rballoc_sub_UE[CC_id] + [j] = 1; + ue_sched_ctl2-> + rballoc_sub_UE[CC_id] + [j] = 1; + MIMO_mode_indicator[CC_id] + [j] = 0; + + if (j < N_RBG[CC_id] - 1) { + rballoc_sub[CC_id][j + + 1] = + 1; + ue_sched_ctl-> + rballoc_sub_UE + [CC_id][j + 1] = 1; + ue_sched_ctl2->rballoc_sub_UE + [CC_id][j + 1] = 1; + MIMO_mode_indicator + [CC_id][j + 1] + = 0; + } + + ue_sched_ctl-> + dl_pow_off[CC_id] + = 0; + ue_sched_ctl2-> + dl_pow_off[CC_id] + = 0; + + + if ((j == N_RBG[CC_id] - 1) + && ((N_RB_DL == 25) + || (N_RB_DL == + 50))) { + + nb_rbs_remaining + [CC_id][UE_id] = + nb_rbs_remaining + [CC_id][UE_id] - + min_rb_unit[CC_id] + + 1; + ue_sched_ctl->pre_nb_available_rbs + [CC_id] = + ue_sched_ctl->pre_nb_available_rbs + [CC_id] + + min_rb_unit[CC_id] + - 1; + nb_rbs_remaining + [CC_id][UE_id2] = + nb_rbs_remaining + [CC_id][UE_id2] - + min_rb_unit[CC_id] + + 1; + ue_sched_ctl2->pre_nb_available_rbs + [CC_id] = + ue_sched_ctl2->pre_nb_available_rbs + [CC_id] + + min_rb_unit[CC_id] + - 1; + } else { + + nb_rbs_remaining + [CC_id][UE_id] = + nb_rbs_remaining + [CC_id][UE_id] - 4; + ue_sched_ctl->pre_nb_available_rbs + [CC_id] = + ue_sched_ctl->pre_nb_available_rbs + [CC_id] + 4; + nb_rbs_remaining + [CC_id][UE_id2] = + nb_rbs_remaining + [CC_id][UE_id2] - + 4; + ue_sched_ctl2->pre_nb_available_rbs + [CC_id] = + ue_sched_ctl2->pre_nb_available_rbs + [CC_id] + 4; + } + + break; + } + } + } + } + } + } + } +#endif + } + } +} +void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id, + int slice_idx, + int min_rb_unit[NFAPI_CC_MAX], + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) +{ + int UE_id, CC_id; + int i; #ifdef TM5 - // data chanel TM5: to be revisited - if ((round == 0) && - (transmission_mode == 5) && - (ue_sched_ctl->dl_pow_off[CC_id] != 1)) { + uint8_t transmission_mode; +#endif + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + int N_RBG[NFAPI_CC_MAX]; + slice_info_t *sli = &RC.mac[Mod_id]->slice_info; + uint8_t (*slice_allocation_mask)[N_RBG_MAX] = sli->pre_processor_results[slice_idx].slice_allocation_mask; - for (j = 0; j < N_RBG[CC_id]; j += 2) { + decode_slice_positioning(Mod_id, slice_idx, slice_allocation_mask); - if ((((j == (N_RBG[CC_id] - 1)) - && (rballoc_sub[CC_id][j] == 0) - && (ue_sched_ctl-> - rballoc_sub_UE[CC_id][j] == 0)) - || ((j < (N_RBG[CC_id] - 1)) - && (rballoc_sub[CC_id][j + 1] == 0) - && - (ue_sched_ctl->rballoc_sub_UE - [CC_id][j + 1] == 0))) - && (nb_rbs_required_remaining[CC_id][UE_id] - > 0)) { - - for (ii = UE_list->next[UE_id + 1]; ii >= 0; - ii = UE_list->next[ii]) { - - UE_id2 = ii; - rnti2 = UE_RNTI(Mod_id, UE_id2); - ue_sched_ctl2 = - &UE_list->UE_sched_ctrl[UE_id2]; - round2 = ue_sched_ctl2->round[CC_id]; - if (rnti2 == NOT_A_RNTI) - continue; - if (UE_list-> - UE_sched_ctrl - [UE_id2].ul_out_of_sync == 1) - continue; - - eNB_UE_stats2 = - UE_list-> - eNB_UE_stats[CC_id][UE_id2]; - //mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0); - - if ((mac_eNB_get_rrc_status - (Mod_id, - rnti2) >= RRC_RECONFIGURED) - && (round2 == 0) + for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) { + COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id]; + N_RBG[CC_id] = to_rbg(cc->mib->message.dl_Bandwidth); + } + + // Remaining RBs are allocated to high priority UEs + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + + if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue; + + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) { + CC_id = UE_list->ordered_CCids[i][UE_id]; + nb_rbs_remaining[CC_id][UE_id] = + nb_rbs_required[CC_id][UE_id] - nb_rbs_accounted[CC_id][UE_id] + nb_rbs_remaining[CC_id][UE_id]; + if (nb_rbs_remaining[CC_id][UE_id] < 0) + abort(); +#ifdef TM5 + transmission_mode = get_tmode(Mod_id, CC_id, UE_id); +#endif + + if (nb_rbs_required[CC_id][UE_id] > 0) + LOG_D(MAC, + "Step 2: nb_rbs_remaining[%d][%d]= %d (accounted %d, required %d, pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n", + CC_id, + UE_id, + nb_rbs_remaining[CC_id][UE_id], + nb_rbs_accounted[CC_id][UE_id], + nb_rbs_required[CC_id][UE_id], + UE_list->UE_sched_ctrl[UE_id].pre_nb_available_rbs[CC_id], + N_RBG[CC_id], + min_rb_unit[CC_id]); + + LOG_T(MAC, "calling dlsch_scheduler_pre_processor_allocate .. \n "); + dlsch_scheduler_pre_processor_allocate(Mod_id, + UE_id, + CC_id, + N_RBG[CC_id], + min_rb_unit[CC_id], + nb_rbs_required, + nb_rbs_remaining, + rballoc_sub, + slice_allocation_mask, + MIMO_mode_indicator); + +#ifdef TM5 + // data chanel TM5: to be revisited + if ((round == 0) && + (transmission_mode == 5) && + (ue_sched_ctl->dl_pow_off[CC_id] != 1)) { + + for (j = 0; j < N_RBG[CC_id]; j += 2) { + + if ((((j == (N_RBG[CC_id] - 1)) + && (rballoc_sub[CC_id][j] == 0) + && (ue_sched_ctl-> + rballoc_sub_UE[CC_id][j] == 0)) + || ((j < (N_RBG[CC_id] - 1)) + && (rballoc_sub[CC_id][j + 1] == 0) + && + (ue_sched_ctl->rballoc_sub_UE + [CC_id][j + 1] == 0))) + && (nb_rbs_remaining[CC_id][UE_id] + > 0)) { + + for (i = UE_list->next[UE_id + 1]; i >= 0; + i = UE_list->next[i]) { + + UE_id2 = i; + rnti2 = UE_RNTI(Mod_id, UE_id2); + ue_sched_ctl2 = + &UE_list->UE_sched_ctrl[UE_id2]; + round2 = ue_sched_ctl2->round[CC_id]; + if (rnti2 == NOT_A_RNTI) + continue; + if (UE_list-> + UE_sched_ctrl + [UE_id2].ul_out_of_sync == 1) + continue; + + eNB_UE_stats2 = + UE_list-> + eNB_UE_stats[CC_id][UE_id2]; + //mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0); + + if ((mac_eNB_get_rrc_status + (Mod_id, + rnti2) >= RRC_RECONFIGURED) + && (round2 == 0) + && + (get_tmode(Mod_id, CC_id, UE_id2) + == 5) + && (ue_sched_ctl-> + dl_pow_off[CC_id] != 1)) { + + if ((((j == (N_RBG[CC_id] - 1)) && - (get_tmode(Mod_id, CC_id, UE_id2) - == 5) - && (ue_sched_ctl-> - dl_pow_off[CC_id] != 1)) { - - if ((((j == (N_RBG[CC_id] - 1)) - && - (ue_sched_ctl->rballoc_sub_UE - [CC_id][j] == 0)) - || ((j < (N_RBG[CC_id] - 1)) - && - (ue_sched_ctl-> - rballoc_sub_UE[CC_id][j + - 1] - == 0))) - && - (nb_rbs_required_remaining - [CC_id] - [UE_id2] > 0)) { - - if ((((eNB_UE_stats2-> - DL_pmi_single ^ - eNB_UE_stats1-> - DL_pmi_single) - << (14 - j)) & 0xc000) == 0x4000) { //MU-MIMO only for 25 RBs configuration - - rballoc_sub[CC_id][j] = 1; - ue_sched_ctl-> - rballoc_sub_UE[CC_id] - [j] = 1; - ue_sched_ctl2-> - rballoc_sub_UE[CC_id] - [j] = 1; - MIMO_mode_indicator[CC_id] - [j] = 0; - - if (j < N_RBG[CC_id] - 1) { - rballoc_sub[CC_id][j + - 1] = - 1; - ue_sched_ctl-> - rballoc_sub_UE - [CC_id][j + 1] = 1; - ue_sched_ctl2->rballoc_sub_UE - [CC_id][j + 1] = 1; - MIMO_mode_indicator - [CC_id][j + 1] - = 0; - } - - ue_sched_ctl-> - dl_pow_off[CC_id] - = 0; - ue_sched_ctl2-> - dl_pow_off[CC_id] - = 0; - - - if ((j == N_RBG[CC_id] - 1) - && ((N_RB_DL == 25) - || (N_RB_DL == - 50))) { - - nb_rbs_required_remaining - [CC_id][UE_id] = - nb_rbs_required_remaining - [CC_id][UE_id] - - min_rb_unit[CC_id] - + 1; - ue_sched_ctl->pre_nb_available_rbs - [CC_id] = - ue_sched_ctl->pre_nb_available_rbs - [CC_id] + - min_rb_unit[CC_id] - - 1; - nb_rbs_required_remaining - [CC_id][UE_id2] = - nb_rbs_required_remaining - [CC_id][UE_id2] - - min_rb_unit[CC_id] - + 1; - ue_sched_ctl2->pre_nb_available_rbs - [CC_id] = - ue_sched_ctl2->pre_nb_available_rbs - [CC_id] + - min_rb_unit[CC_id] - - 1; - } else { - - nb_rbs_required_remaining - [CC_id][UE_id] = - nb_rbs_required_remaining - [CC_id][UE_id] - 4; - ue_sched_ctl->pre_nb_available_rbs - [CC_id] = - ue_sched_ctl->pre_nb_available_rbs - [CC_id] + 4; - nb_rbs_required_remaining - [CC_id][UE_id2] = - nb_rbs_required_remaining - [CC_id][UE_id2] - - 4; - ue_sched_ctl2->pre_nb_available_rbs - [CC_id] = - ue_sched_ctl2->pre_nb_available_rbs - [CC_id] + 4; - } - - break; - } - } - } - } - } - } - } + (ue_sched_ctl->rballoc_sub_UE + [CC_id][j] == 0)) + || ((j < (N_RBG[CC_id] - 1)) + && + (ue_sched_ctl-> + rballoc_sub_UE[CC_id][j + + 1] + == 0))) + && + (nb_rbs_remaining + [CC_id] + [UE_id2] > 0)) { + + if ((((eNB_UE_stats2-> + DL_pmi_single ^ + eNB_UE_stats1-> + DL_pmi_single) + << (14 - j)) & 0xc000) == 0x4000) { //MU-MIMO only for 25 RBs configuration + + rballoc_sub[CC_id][j] = 1; + ue_sched_ctl-> + rballoc_sub_UE[CC_id] + [j] = 1; + ue_sched_ctl2-> + rballoc_sub_UE[CC_id] + [j] = 1; + MIMO_mode_indicator[CC_id] + [j] = 0; + + if (j < N_RBG[CC_id] - 1) { + rballoc_sub[CC_id][j + + 1] = + 1; + ue_sched_ctl-> + rballoc_sub_UE + [CC_id][j + 1] = 1; + ue_sched_ctl2->rballoc_sub_UE + [CC_id][j + 1] = 1; + MIMO_mode_indicator + [CC_id][j + 1] + = 0; + } + + ue_sched_ctl-> + dl_pow_off[CC_id] + = 0; + ue_sched_ctl2-> + dl_pow_off[CC_id] + = 0; + + + if ((j == N_RBG[CC_id] - 1) + && ((N_RB_DL == 25) + || (N_RB_DL == + 50))) { + + nb_rbs_remaining + [CC_id][UE_id] = + nb_rbs_remaining + [CC_id][UE_id] - + min_rb_unit[CC_id] + + 1; + ue_sched_ctl->pre_nb_available_rbs + [CC_id] = + ue_sched_ctl->pre_nb_available_rbs + [CC_id] + + min_rb_unit[CC_id] + - 1; + nb_rbs_remaining + [CC_id][UE_id2] = + nb_rbs_remaining + [CC_id][UE_id2] - + min_rb_unit[CC_id] + + 1; + ue_sched_ctl2->pre_nb_available_rbs + [CC_id] = + ue_sched_ctl2->pre_nb_available_rbs + [CC_id] + + min_rb_unit[CC_id] + - 1; + } else { + + nb_rbs_remaining + [CC_id][UE_id] = + nb_rbs_remaining + [CC_id][UE_id] - 4; + ue_sched_ctl->pre_nb_available_rbs + [CC_id] = + ue_sched_ctl->pre_nb_available_rbs + [CC_id] + 4; + nb_rbs_remaining + [CC_id][UE_id2] = + nb_rbs_remaining + [CC_id][UE_id2] - + 4; + ue_sched_ctl2->pre_nb_available_rbs + [CC_id] = + ue_sched_ctl2->pre_nb_available_rbs + [CC_id] + 4; + } + + break; + } + } + } + } + } + } + } #endif - } // total_ue_count - } // CC - } // UE - } // end of for for r1 and r2 + } + } } // This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done void dlsch_scheduler_pre_processor(module_id_t Mod_id, - slice_id_t slice_id, + int slice_idx, frame_t frameP, sub_frame_t subframeP, - int N_RBG[NFAPI_CC_MAX], - int *mbsfn_flag) { - + int *mbsfn_flag, + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]) +{ int UE_id; uint8_t CC_id; uint16_t i, j; - uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]; - uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]; // If TM5 is revisited, we can move this inside accounting - int min_rb_unit[NFAPI_CC_MAX]; - uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; + + slice_info_t *sli = &RC.mac[Mod_id]->slice_info; + uint16_t (*nb_rbs_required)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_required; + uint16_t (*nb_rbs_accounted)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_accounted; + uint16_t (*nb_rbs_remaining)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_remaining; + uint8_t (*MIMO_mode_indicator)[N_RBG_MAX] = sli->pre_processor_results[slice_idx].MIMO_mode_indicator; UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; UE_sched_ctrl *ue_sched_ctl; @@ -910,63 +1199,64 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id, UE_sched_ctrl *ue_sched_ctl1, *ue_sched_ctl2; #endif - for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) { - - if (mbsfn_flag[CC_id] > 0) // If this CC is allocated for MBSFN skip it here - continue; - - min_rb_unit[CC_id] = get_min_rb_unit(Mod_id, CC_id); - - for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; ++UE_id) { - if (UE_list->active[UE_id] != TRUE) - continue; - - if (!ue_slice_membership(UE_id, slice_id)) - continue; - - // Initialize scheduling information for all active UEs - dlsch_scheduler_pre_processor_reset(Mod_id, - UE_id, - CC_id, - frameP, - subframeP, - N_RBG[CC_id], - nb_rbs_required, - rballoc_sub, - MIMO_mode_indicator); - - } - } - + // Initialize scheduling information for all active UEs + memset(&sli->pre_processor_results[slice_idx], 0, sizeof(sli->pre_processor_results[slice_idx])); + // FIXME: After the memset above, some of the resets in reset() are redundant + dlsch_scheduler_pre_processor_reset(Mod_id, slice_idx, frameP, subframeP, + min_rb_unit, + nb_rbs_required, + rballoc_sub, + MIMO_mode_indicator, + mbsfn_flag); // FIXME: Not sure if useful + + // STATUS // Store the DLSCH buffer for each logical channel - store_dlsch_buffer(Mod_id, slice_id, frameP, subframeP); + store_dlsch_buffer(Mod_id, slice_idx, frameP, subframeP); // Calculate the number of RBs required by each UE on the basis of logical channel's buffer - assign_rbs_required(Mod_id, slice_id, frameP, subframeP, nb_rbs_required, min_rb_unit); + assign_rbs_required(Mod_id, slice_idx, frameP, subframeP, nb_rbs_required, min_rb_unit); // Sorts the user on the basis of dlsch logical channel buffer and CQI - sort_UEs(Mod_id, slice_id, frameP, subframeP); - - // This function does the main allocation of the number of RBs - dlsch_scheduler_pre_processor_accounting(Mod_id, - slice_id, - frameP, - subframeP, - N_RBG, - min_rb_unit, - rballoc_sub, - MIMO_mode_indicator, - nb_rbs_required); + sort_UEs(Mod_id, slice_idx, frameP, subframeP); + // ACCOUNTING + // This procedure decides the number of RBs to allocate + dlsch_scheduler_pre_processor_accounting(Mod_id, slice_idx, frameP, subframeP, + min_rb_unit, + nb_rbs_required, + nb_rbs_accounted); + // POSITIONING + // This procedure does the main allocation of the RBs + dlsch_scheduler_pre_processor_positioning(Mod_id, slice_idx, + min_rb_unit, + nb_rbs_required, + nb_rbs_accounted, + nb_rbs_remaining, + rballoc_sub, + MIMO_mode_indicator); + + // SHARING + // If there are available RBs left in the slice, allocate them to the highest priority UEs + if (RC.mac[Mod_id]->slice_info.intraslice_share_active) { + dlsch_scheduler_pre_processor_intraslice_sharing(Mod_id, slice_idx, + min_rb_unit, + nb_rbs_required, + nb_rbs_accounted, + nb_rbs_remaining, + rballoc_sub, + MIMO_mode_indicator); + } #ifdef TM5 // This has to be revisited!!!! for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) { - i1 = 0; - i2 = 0; - i3 = 0; + COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id]; + int N_RBG = to_rbg(cc->mib->message.dl_Bandwidth); + i1 = 0; + i2 = 0; + i3 = 0; - for (j = 0; j < N_RBG[CC_id]; j++) { + for (j = 0; j < N_RBG; j++) { if (MIMO_mode_indicator[CC_id][j] == 2) { i1 = i1 + 1; } else if (MIMO_mode_indicator[CC_id][j] == 1) { @@ -974,27 +1264,27 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id, } else if (MIMO_mode_indicator[CC_id][j] == 0) { i3 = i3 + 1; } - } + } - if ((i1 < N_RBG[CC_id]) && (i2 > 0) && (i3 == 0)) { + if ((i1 < N_RBG) && (i2 > 0) && (i3 == 0)) { PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]-> check_for_SUMIMO_transmissions + 1; - } + } - if (i3 == N_RBG[CC_id] && i1 == 0 && i2 == 0) { + if (i3 == N_RBG && i1 == 0 && i2 == 0) { PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions + 1; - } + } - if ((i1 < N_RBG[CC_id]) && (i3 > 0)) { + if ((i1 < N_RBG) && (i3 > 0)) { PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]-> check_for_MUMIMO_transmissions + 1; - } + } - PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions = + PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions + 1; @@ -1007,20 +1297,23 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id, for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) { CC_id = UE_list->ordered_CCids[i][UE_id]; //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id]; + COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id]; + int N_RBG = to_rbg(cc->mib->message.dl_Bandwidth); if (ue_sched_ctl->pre_nb_available_rbs[CC_id] > 0) { LOG_D(MAC, "******************DL Scheduling Information for UE%d ************************\n", UE_id); LOG_D(MAC, "dl power offset UE%d = %d \n", UE_id, ue_sched_ctl->dl_pow_off[CC_id]); LOG_D(MAC, "***********RB Alloc for every subband for UE%d ***********\n", UE_id); - for (j = 0; j < N_RBG[CC_id]; j++) { + for (j = 0; j < N_RBG; j++) { //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].rballoc_sub[UE_id] = rballoc_sub_UE[CC_id][UE_id][UE_id]; LOG_D(MAC, "RB Alloc for UE%d and Subband%d = %d\n", UE_id, j, ue_sched_ctl->rballoc_sub_UE[CC_id][j]); } //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id]; LOG_D(MAC, "[eNB %d][SLICE %d]Total RBs allocated for UE%d = %d\n", - Mod_id, slice_id, UE_id, ue_sched_ctl->pre_nb_available_rbs[CC_id]); + Mod_id, RC.mac[Mod_id]->slice_info.dl[slice_idx].id, UE_id, + ue_sched_ctl->pre_nb_available_rbs[CC_id]); } } } @@ -1029,269 +1322,289 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id, #define SF0_LIMIT 1 void -dlsch_scheduler_pre_processor_reset(int module_idP, - int UE_id, - uint8_t CC_id, - int frameP, - int subframeP, - int N_RBG, - uint16_t nb_rbs_required[NFAPI_CC_MAX] - [MAX_MOBILES_PER_ENB], - unsigned char - rballoc_sub[NFAPI_CC_MAX] - [N_RBG_MAX], - unsigned char - MIMO_mode_indicator[NFAPI_CC_MAX] - [N_RBG_MAX]) +dlsch_scheduler_pre_processor_reset(module_id_t module_idP, + int slice_idx, + frame_t frameP, + sub_frame_t subframeP, + int min_rb_unit[NFAPI_CC_MAX], + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX], + int *mbsfn_flag) { + + int UE_id; + uint8_t CC_id; int i, j; - UE_list_t *UE_list = &RC.mac[module_idP]->UE_list; - UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; + UE_list_t *UE_list; + UE_sched_ctrl *ue_sched_ctl; + int N_RB_DL, RBGsize, RBGsize_last; + int N_RBG[NFAPI_CC_MAX]; - uint8_t *vrb_map = RC.mac[module_idP]->common_channels[CC_id].vrb_map; - int N_RB_DL = - to_prb(RC.mac[module_idP]->common_channels[CC_id].mib->message.dl_Bandwidth); - int RBGsize = N_RB_DL / N_RBG, RBGsize_last; #ifdef SF0_LIMIT - int sf0_upper = -1, sf0_lower = -1; + int sf0_lower, sf0_upper; #endif + rnti_t rnti; + uint8_t *vrb_map; + COMMON_channels_t *cc; +// + for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) { - LOG_D(MAC, "Running preprocessor for UE %d (%x)\n", UE_id,(int)(UE_RNTI(module_idP, UE_id))); - // initialize harq_pid and round + LOG_D(MAC, "Running preprocessor for UE %d (%x)\n", UE_id,(int)(UE_RNTI(module_idP, UE_id))); + // initialize harq_pid and round + cc = &RC.mac[module_idP]->common_channels[CC_id]; + N_RBG[CC_id] = to_rbg(cc->mib->message.dl_Bandwidth); + min_rb_unit[CC_id] = get_min_rb_unit(module_idP, CC_id); - if (ue_sched_ctl->ta_timer) - ue_sched_ctl->ta_timer--; + if (mbsfn_flag[CC_id] > 0) // If this CC is allocated for MBSFN skip it here + continue; - /* - eNB_UE_stats *eNB_UE_stats; + for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; ++UE_id) { - if (eNB_UE_stats == NULL) - return; + UE_list = &RC.mac[module_idP]->UE_list; + ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; + rnti = UE_RNTI(module_idP, UE_id); - mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti, - frameP,subframeP, - &ue_sched_ctl->harq_pid[CC_id], - &ue_sched_ctl->round[CC_id], - openair_harq_DL); - if (ue_sched_ctl->ta_timer == 0) { + if (rnti == NOT_A_RNTI) + continue; - // WE SHOULD PROTECT the eNB_UE_stats with a mutex here ... + if (UE_list->active[UE_id] != TRUE) + continue; - ue_sched_ctl->ta_timer = 20; // wait 20 subframes before taking TA measurement from PHY - switch (N_RB_DL) { - case 6: - ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update; - break; + if (!ue_dl_slice_membership(module_idP, UE_id, slice_idx)) + continue; - case 15: - ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/2; - break; + LOG_D(MAC, "Running preprocessor for UE %d (%x)\n", UE_id, rnti); + // initialize harq_pid and round + if (ue_sched_ctl->ta_timer) + ue_sched_ctl->ta_timer--; - case 25: - ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/4; - break; - case 50: - ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/8; - break; + /* + eNB_UE_stats *eNB_UE_stats; - case 75: - ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/12; - break; + if (eNB_UE_stats == NULL) + return; - case 100: - ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/16; - break; - } - // clear the update in case PHY does not have a new measurement after timer expiry - eNB_UE_stats->timing_advance_update = 0; - } - else { - ue_sched_ctl->ta_timer--; - ue_sched_ctl->ta_update =0; // don't trigger a timing advance command - } + mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti, + frameP,subframeP, + &ue_sched_ctl->harq_pid[CC_id], + &ue_sched_ctl->round[CC_id], + openair_harq_DL); - if (UE_id==0) { - VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_TIMING_ADVANCE,ue_sched_ctl->ta_update); - } - */ - nb_rbs_required[CC_id][UE_id] = 0; - ue_sched_ctl->pre_nb_available_rbs[CC_id] = 0; - ue_sched_ctl->dl_pow_off[CC_id] = 2; + if (ue_sched_ctl->ta_timer == 0) { + + // WE SHOULD PROTECT the eNB_UE_stats with a mutex here ... + + ue_sched_ctl->ta_timer = 20; // wait 20 subframes before taking TA measurement from PHY + switch (N_RB_DL) { + case 6: + ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update; + break; + + case 15: + ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/2; + break; + + case 25: + ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/4; + break; + + case 50: + ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/8; + break; + + case 75: + ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/12; + break; + + case 100: + ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/16; + break; + } + // clear the update in case PHY does not have a new measurement after timer expiry + eNB_UE_stats->timing_advance_update = 0; + } + else { + ue_sched_ctl->ta_timer--; + ue_sched_ctl->ta_update =0; // don't trigger a timing advance command + } - switch (N_RB_DL) { - case 6: - RBGsize = 1; - RBGsize_last = 1; - break; - case 15: - RBGsize = 2; - RBGsize_last = 1; - break; - case 25: - RBGsize = 2; - RBGsize_last = 1; - break; - case 50: - RBGsize = 3; - RBGsize_last = 2; - break; - case 75: - RBGsize = 4; - RBGsize_last = 3; - break; - case 100: - RBGsize = 4; - RBGsize_last = 4; - break; - default: - AssertFatal(1 == 0, "unsupported RBs (%d)\n", N_RB_DL); - } + + if (UE_id==0) { + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_TIMING_ADVANCE,ue_sched_ctl->ta_update); + } + */ + + nb_rbs_required[CC_id][UE_id] = 0; + ue_sched_ctl->pre_nb_available_rbs[CC_id] = 0; + ue_sched_ctl->dl_pow_off[CC_id] = 2; + + for (i = 0; i < N_RBG[CC_id]; i++) { + ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 0; + } + } + + N_RB_DL = to_prb(RC.mac[module_idP]->common_channels[CC_id].mib->message.dl_Bandwidth); #ifdef SF0_LIMIT - switch (N_RBG) { - case 6: - sf0_lower = 0; - sf0_upper = 5; - break; - case 8: - sf0_lower = 2; - sf0_upper = 5; - break; - case 13: - sf0_lower = 4; - sf0_upper = 7; - break; - case 17: - sf0_lower = 7; - sf0_upper = 9; - break; - case 25: - sf0_lower = 11; - sf0_upper = 13; - break; - default: - AssertFatal(1 == 0, "unsupported RBs (%d)\n", N_RB_DL); - } + switch (N_RBG[CC_id]) { + case 6: + sf0_lower = 0; + sf0_upper = 5; + break; + case 8: + sf0_lower = 2; + sf0_upper = 5; + break; + case 13: + sf0_lower = 4; + sf0_upper = 7; + break; + case 17: + sf0_lower = 7; + sf0_upper = 9; + break; + case 25: + sf0_lower = 11; + sf0_upper = 13; + break; + default: + AssertFatal(1 == 0, "unsupported RBs (%d)\n", N_RB_DL); + } #endif - // Initialize Subbands according to VRB map - for (i = 0; i < N_RBG; i++) { - int rb_size = i == N_RBG - 1 ? RBGsize_last : RBGsize; + switch (N_RB_DL) { + case 6: + RBGsize = 1; + RBGsize_last = 1; + break; + case 15: + RBGsize = 2; + RBGsize_last = 1; + break; + case 25: + RBGsize = 2; + RBGsize_last = 1; + break; + case 50: + RBGsize = 3; + RBGsize_last = 2; + break; + case 75: + RBGsize = 4; + RBGsize_last = 3; + break; + case 100: + RBGsize = 4; + RBGsize_last = 4; + break; + default: + AssertFatal(1 == 0, "unsupported RBs (%d)\n", N_RB_DL); + } + + vrb_map = RC.mac[module_idP]->common_channels[CC_id].vrb_map; + // Initialize Subbands according to VRB map + for (i = 0; i < N_RBG[CC_id]; i++) { + int rb_size = i == N_RBG[CC_id] - 1 ? RBGsize_last : RBGsize; - ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 0; - rballoc_sub[CC_id][i] = 0; #ifdef SF0_LIMIT - // for avoiding 6+ PRBs around DC in subframe 0 (avoid excessive errors) - /* TODO: make it proper - allocate those RBs, do not "protect" them, but - * compute number of available REs and limit MCS according to the - * TBS table 36.213 7.1.7.2.1-1 (can be done after pre-processor) - */ - if (subframeP == 0 && i >= sf0_lower && i <= sf0_upper) - rballoc_sub[CC_id][i] = 1; + // for avoiding 6+ PRBs around DC in subframe 0 (avoid excessive errors) + /* TODO: make it proper - allocate those RBs, do not "protect" them, but + * compute number of available REs and limit MCS according to the + * TBS table 36.213 7.1.7.2.1-1 (can be done after pre-processor) + */ + if (subframeP == 0 && i >= sf0_lower && i <= sf0_upper) + rballoc_sub[CC_id][i] = 1; #endif - // for SI-RNTI,RA-RNTI and P-RNTI allocations - for (j = 0; j < rb_size; j++) { - if (vrb_map[j + (i * RBGsize)] != 0) { - rballoc_sub[CC_id][i] = 1; - LOG_D(MAC, "Frame %d, subframe %d : vrb %d allocated\n", - frameP, subframeP, j + (i * RBGsize)); - break; + // for SI-RNTI,RA-RNTI and P-RNTI allocations + for (j = 0; j < rb_size; j++) { + if (vrb_map[j + (i*RBGsize)] != 0) { + rballoc_sub[CC_id][i] = 1; + LOG_D(MAC, "Frame %d, subframe %d : vrb %d allocated\n", frameP, subframeP, j + (i*RBGsize)); + break; + } } + //LOG_D(MAC, "Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d\n", + //frameP, subframeP, CC_id, i, rballoc_sub[CC_id][i]); + MIMO_mode_indicator[CC_id][i] = 2; } - LOG_D(MAC, "Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d\n", - frameP, subframeP, CC_id, i, rballoc_sub[CC_id][i]); - MIMO_mode_indicator[CC_id][i] = 2; } } void dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id, - int UE_id, - uint8_t CC_id, - int N_RBG, - int transmission_mode, - int min_rb_unit, - uint8_t N_RB_DL, - uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], - uint16_t nb_rbs_required_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], - unsigned char rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], - unsigned char MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) + int UE_id, + uint8_t CC_id, + int N_RBG, + int min_rb_unit, + uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], + uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX], + uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) { + int i; + int tm = get_tmode(Mod_id, CC_id, UE_id); + UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; + UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; + int N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth); - int i; - UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list; - UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; - - for (i = 0; i < N_RBG; i++) { - - if ((rballoc_sub[CC_id][i] == 0) && - (ue_sched_ctl->rballoc_sub_UE[CC_id][i] == 0) && - (nb_rbs_required_remaining[CC_id][UE_id] > 0) && - (ue_sched_ctl->pre_nb_available_rbs[CC_id] < - nb_rbs_required[CC_id][UE_id])) { - - // if this UE is not scheduled for TM5 - if (ue_sched_ctl->dl_pow_off[CC_id] != 0) { - - if ((i == N_RBG - 1) - && ((N_RB_DL == 25) || (N_RB_DL == 50))) { - if (nb_rbs_required_remaining[CC_id][UE_id] >= - min_rb_unit - 1) { - rballoc_sub[CC_id][i] = 1; - ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1; - MIMO_mode_indicator[CC_id][i] = 1; - if (transmission_mode == 5) { - ue_sched_ctl->dl_pow_off[CC_id] = 1; - } - nb_rbs_required_remaining[CC_id][UE_id] = - nb_rbs_required_remaining[CC_id][UE_id] - - min_rb_unit + 1; - ue_sched_ctl->pre_nb_available_rbs[CC_id] = - ue_sched_ctl->pre_nb_available_rbs[CC_id] + - min_rb_unit - 1; - } - } else { - if (nb_rbs_required_remaining[CC_id][UE_id] >= - min_rb_unit) { - rballoc_sub[CC_id][i] = 1; - ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1; - MIMO_mode_indicator[CC_id][i] = 1; - if (transmission_mode == 5) { - ue_sched_ctl->dl_pow_off[CC_id] = 1; - } - nb_rbs_required_remaining[CC_id][UE_id] = - nb_rbs_required_remaining[CC_id][UE_id] - - min_rb_unit; - ue_sched_ctl->pre_nb_available_rbs[CC_id] = - ue_sched_ctl->pre_nb_available_rbs[CC_id] + - min_rb_unit; - } - } - } // dl_pow_off[CC_id][UE_id] ! = 0 - } + for (i = 0; i < N_RBG; i++) { + + if (rballoc_sub[CC_id][i] != 0) continue; + if (ue_sched_ctl->rballoc_sub_UE[CC_id][i] != 0) continue; + if (nb_rbs_remaining[CC_id][UE_id] <= 0) continue; + if (ue_sched_ctl->pre_nb_available_rbs[CC_id] >= nb_rbs_required[CC_id][UE_id]) continue; + if (ue_sched_ctl->dl_pow_off[CC_id] == 0) continue; + if (slice_allocation_mask[CC_id][i] == 0) continue; + + if ((i == N_RBG - 1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) { + // Allocating last, smaller RBG + if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit - 1) { + rballoc_sub[CC_id][i] = 1; + ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1; + MIMO_mode_indicator[CC_id][i] = 1; + if (tm == 5) { + ue_sched_ctl->dl_pow_off[CC_id] = 1; + } + nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit + 1; + ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1; + } + } else { + // Allocating a standard-sized RBG + if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit) { + rballoc_sub[CC_id][i] = 1; + ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1; + MIMO_mode_indicator[CC_id][i] = 1; + if (tm == 5) { + ue_sched_ctl->dl_pow_off[CC_id] = 1; + } + nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit; + ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit; + } } + } } /// ULSCH PRE_PROCESSOR void ulsch_scheduler_pre_processor(module_id_t module_idP, - slice_id_t slice_id, + int slice_idx, int frameP, sub_frame_t subframeP, unsigned char sched_subframeP, uint16_t *first_rb) { - int16_t i; uint16_t UE_id, n, r; uint8_t CC_id, harq_pid; @@ -1302,13 +1615,15 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, uint16_t total_ue_count[NFAPI_CC_MAX]; rnti_t rnti = -1; UE_list_t *UE_list = &RC.mac[module_idP]->UE_list; + slice_info_t *sli = &RC.mac[module_idP]->slice_info; UE_TEMPLATE *UE_template = 0; UE_sched_ctrl *ue_sched_ctl; int N_RB_UL = 0; + uint16_t available_rbs, first_rb_offset; LOG_D(MAC, "In ulsch_preprocessor: assign max mcs min rb\n"); // maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB - assign_max_mcs_min_rb(module_idP, slice_id, frameP, subframeP, first_rb); + assign_max_mcs_min_rb(module_idP, slice_idx, frameP, subframeP, first_rb); LOG_D(MAC, "In ulsch_preprocessor: sort ue \n"); // sort ues @@ -1330,9 +1645,9 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, // This is the actual CC_id in the list CC_id = UE_list->ordered_ULCCids[n][i]; UE_template = &UE_list->UE_template[CC_id][i]; - if (!ue_slice_membership(i, slice_id)) + if (!ue_ul_slice_membership(module_idP, i, slice_idx)) continue; - if (UE_template->pre_allocated_nb_rb_ul[slice_id] > 0) { + if (UE_template->pre_allocated_nb_rb_ul[slice_idx] > 0) { total_ue_count[CC_id] += 1; } } @@ -1351,7 +1666,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1) continue; - if (!ue_slice_membership(UE_id, slice_id)) + if (!ue_ul_slice_membership(module_idP, UE_id, slice_idx)) continue; LOG_D(MAC, "In ulsch_preprocessor: handling UE %d/%x\n", UE_id, @@ -1375,14 +1690,21 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth); ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; - ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] = flexran_nb_rbs_allowed_slice(slice_percentage_uplink[slice_id],N_RB_UL); + ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] = + nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL); + + first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx]; + available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx], + N_RB_UL - first_rb[CC_id] - first_rb_offset); + if (available_rbs < 0) + available_rbs = 0; if (total_ue_count[CC_id] == 0) { average_rbs_per_user[CC_id] = 0; } else if (total_ue_count[CC_id] == 1) { // increase the available RBs, special case, - average_rbs_per_user[CC_id] = ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id] + 1; - } else if (total_ue_count[CC_id] <= (ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id])) { - average_rbs_per_user[CC_id] = (uint16_t) floor((ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id]) / total_ue_count[CC_id]); + average_rbs_per_user[CC_id] = (uint16_t) (available_rbs + 1); + } else if (total_ue_count[CC_id] <= available_rbs) { + average_rbs_per_user[CC_id] = (uint16_t) floor(available_rbs / total_ue_count[CC_id]); } else { average_rbs_per_user[CC_id] = 1; LOG_W(MAC, @@ -1403,7 +1725,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, continue; if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1) continue; - if (!ue_slice_membership(i, slice_id)) + if (!ue_ul_slice_membership(module_idP, i, slice_idx)) continue; @@ -1422,7 +1744,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, nb_allocated_rbs[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid]; } else { nb_allocated_rbs[CC_id][UE_id] = - cmin(UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[slice_id], + cmin(UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[slice_idx], average_rbs_per_user[CC_id]); } @@ -1444,7 +1766,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, continue; if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1) continue; - if (!ue_slice_membership(i, slice_id)) + if (!ue_ul_slice_membership(module_idP, i, slice_idx)) continue; UE_id = i; @@ -1454,25 +1776,28 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, // This is the actual CC_id in the list CC_id = UE_list->ordered_ULCCids[n][UE_id]; UE_template = &UE_list->UE_template[CC_id][UE_id]; - total_remaining_rbs[CC_id] = - ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id] - total_allocated_rbs[CC_id]; + N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth); + first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx]; + available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx], + N_RB_UL - first_rb[CC_id] - first_rb_offset); + total_remaining_rbs[CC_id] = available_rbs - total_allocated_rbs[CC_id]; if (total_ue_count[CC_id] == 1) { total_remaining_rbs[CC_id] += 1; } if (r == 0) { - while ((UE_template->pre_allocated_nb_rb_ul[slice_id] > 0) - && (nb_allocated_rbs[CC_id][UE_id] < UE_template->pre_allocated_nb_rb_ul[slice_id]) + while ((UE_template->pre_allocated_nb_rb_ul[slice_idx] > 0) + && (nb_allocated_rbs[CC_id][UE_id] < UE_template->pre_allocated_nb_rb_ul[slice_idx]) && (total_remaining_rbs[CC_id] > 0)) { nb_allocated_rbs[CC_id][UE_id] = cmin(nb_allocated_rbs[CC_id][UE_id] + 1, - UE_template->pre_allocated_nb_rb_ul[slice_id]); + UE_template->pre_allocated_nb_rb_ul[slice_idx]); total_remaining_rbs[CC_id]--; total_allocated_rbs[CC_id]++; } } else { - UE_template->pre_allocated_nb_rb_ul[slice_id] = + UE_template->pre_allocated_nb_rb_ul[slice_idx] = nb_allocated_rbs[CC_id][UE_id]; LOG_D(MAC, "******************UL Scheduling Information for UE%d CC_id %d ************************\n", @@ -1480,7 +1805,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, LOG_D(MAC, "[eNB %d] total RB allocated for UE%d CC_id %d = %d\n", module_idP, UE_id, CC_id, - UE_template->pre_allocated_nb_rb_ul[slice_id]); + UE_template->pre_allocated_nb_rb_ul[slice_idx]); } } } @@ -1488,7 +1813,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, } void -assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, +assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP, sub_frame_t subframeP, uint16_t * first_rb) { @@ -1500,11 +1825,13 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, int rb_table_index = 0, tbs, tx_power; eNB_MAC_INST *eNB = RC.mac[module_idP]; UE_list_t *UE_list = &eNB->UE_list; + slice_info_t *sli = &RC.mac[module_idP]->slice_info; UE_TEMPLATE *UE_template; UE_sched_ctrl *ue_sched_ctl; int Ncp; int N_RB_UL; + int first_rb_offset, available_rbs; for (i = 0; i < MAX_MOBILES_PER_ENB; i++) { if (UE_list->active[i] != TRUE) @@ -1516,16 +1843,16 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, continue; if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1) continue; - if (!ue_slice_membership(i, slice_id)) + if (!ue_ul_slice_membership(module_idP, i, slice_idx)) continue; if (UE_list->UE_sched_ctrl[i].phr_received == 1) { /* if we've received the power headroom information the UE, we can go to * maximum mcs */ - mcs = cmin(20, slice_maxmcs_uplink[slice_id]); + mcs = cmin(20, sli->ul[slice_idx].maxmcs); } else { /* otherwise, limit to QPSK PUSCH */ - mcs = cmin(10, slice_maxmcs_uplink[slice_id]); + mcs = cmin(10, sli->ul[slice_idx].maxmcs); } UE_id = i; @@ -1547,7 +1874,8 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, Ncp = RC.mac[module_idP]->common_channels[CC_id].Ncp; N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth); - ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] = flexran_nb_rbs_allowed_slice(slice_percentage_uplink[slice_id],N_RB_UL); + ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] = + nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL); int bytes_to_schedule = UE_template->estimated_ul_buffer - UE_template->scheduled_ul_bytes; if (bytes_to_schedule < 0) bytes_to_schedule = 0; @@ -1570,18 +1898,20 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); // fixme: set use_srs } + first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx]; + available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx], + N_RB_UL - first_rb[CC_id] - first_rb_offset); + while ((tbs < bits_to_schedule) - && (rb_table[rb_table_index] < (ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id])) - && ((UE_template->phr_info - tx_power) > 0) - && (rb_table_index < 32)) { + && (rb_table[rb_table_index] < available_rbs) + && ((UE_template->phr_info - tx_power) > 0) + && (rb_table_index < 32)) { rb_table_index++; tbs = get_TBS_UL(UE_template->pre_assigned_mcs_ul, rb_table[rb_table_index]) << 3; tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); } - UE_template->ue_tx_power = tx_power; - - if (rb_table[rb_table_index] > (ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id] - 1)) { + if (rb_table[rb_table_index] > (available_rbs - 1)) { rb_table_index--; } // 1 or 2 PRB with cqi enabled does not work well @@ -1590,13 +1920,13 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, } UE_template->pre_allocated_rb_table_index_ul = rb_table_index; - UE_template->pre_allocated_nb_rb_ul[slice_id] = rb_table[rb_table_index]; + UE_template->pre_allocated_nb_rb_ul[slice_idx] = rb_table[rb_table_index]; LOG_D(MAC, "[eNB %d] frame %d subframe %d: for UE %d CC %d: pre-assigned mcs %d, pre-allocated rb_table[%d]=%d RBs (phr %d, tx power %d)\n", module_idP, frameP, subframeP, UE_id, CC_id, UE_template->pre_assigned_mcs_ul, UE_template->pre_allocated_rb_table_index_ul, - UE_template->pre_allocated_nb_rb_ul[slice_id], + UE_template->pre_allocated_nb_rb_ul[slice_idx], UE_template->phr_info, tx_power); } else { /* if UE has pending scheduling request then pre-allocate 3 RBs */ @@ -1605,11 +1935,11 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, /* use QPSK mcs */ UE_template->pre_assigned_mcs_ul = 10; UE_template->pre_allocated_rb_table_index_ul = 2; - UE_template->pre_allocated_nb_rb_ul[slice_id] = 3; + UE_template->pre_allocated_nb_rb_ul[slice_idx] = 3; } else { UE_template->pre_assigned_mcs_ul = 0; UE_template->pre_allocated_rb_table_index_ul = -1; - UE_template->pre_allocated_nb_rb_ul[slice_id] = 0; + UE_template->pre_allocated_nb_rb_ul[slice_idx] = 0; } } } diff --git a/openair2/LAYER2/PDCP_v10.1.0/pdcp.c b/openair2/LAYER2/PDCP_v10.1.0/pdcp.c index 4731039c1a949d3c097dd643ad812ab9ac48efc5..135c6e73fa930b13338cbbbe383737d91330fd9c 100644 --- a/openair2/LAYER2/PDCP_v10.1.0/pdcp.c +++ b/openair2/LAYER2/PDCP_v10.1.0/pdcp.c @@ -169,6 +169,11 @@ boolean_t pdcp_data_req( start_meas(&UE_pdcp_stats[ctxt_pP->module_id].data_req); } + for (pdcp_uid = 0; pdcp_uid < MAX_MOBILES_PER_ENB; ++pdcp_uid) { + if (pdcp_enb[ctxt_pP->module_id].rnti[pdcp_uid] == ctxt_pP->rnti) + break; + } + // PDCP transparent mode for MBMS traffic if (modeP == PDCP_TRANSMISSION_MODE_TRANSPARENT) { @@ -366,13 +371,24 @@ boolean_t pdcp_data_req( LOG_DUMPMSG(PDCP,DEBUG_PDCP,(char *)pdcp_pdu_p->data,pdcp_pdu_size, "[MSG] PDCP DL %s PDU on rb_id %d\n",(srb_flagP)? "CONTROL" : "DATA", rb_idP); - rlc_status = rlc_data_req(ctxt_pP, srb_flagP, MBMS_FLAG_NO, rb_idP, muiP, confirmP, pdcp_pdu_size, pdcp_pdu_p + /* if RLC buffer for this UE has been full, we want to skip all subsequent + * traffic for TM_SKIP_FULL_BUF_MS ms. Afterwards, it will be checkd again */ + if (pdcp_enb[ctxt_pP->module_id].time_buf_full[pdcp_uid] == 0 + || pdcp_enb[ctxt_pP->module_id].sfn - pdcp_enb[ctxt_pP->module_id].time_buf_full[pdcp_uid] >= TM_SKIP_FULL_BUF_MS) { + pdcp_enb[ctxt_pP->module_id].time_buf_full[pdcp_uid] = 0; + rlc_status = rlc_data_req(ctxt_pP, srb_flagP, MBMS_FLAG_NO, rb_idP, muiP, + confirmP, pdcp_pdu_size, pdcp_pdu_p #if (RRC_VERSION >= MAKE_VERSION(14, 0, 0)) - ,sourceL2Id - ,destinationL2Id + ,sourceL2Id + ,destinationL2Id #endif - ); - + ); + } else { + /* RLC would free pdcp_pdu_p, but since we skip it, have to do it + * ourselves and fake normal operation */ + free_mem_block(pdcp_pdu_p, __func__); + rlc_status = RLC_OP_SKIPPED_FUL_BUF; + } } switch (rlc_status) { @@ -383,18 +399,24 @@ boolean_t pdcp_data_req( case RLC_OP_STATUS_BAD_PARAMETER: LOG_W(PDCP, "Data sending request over RLC failed with 'Bad Parameter' reason!\n"); - ret= FALSE; - break; + return FALSE; case RLC_OP_STATUS_INTERNAL_ERROR: LOG_W(PDCP, "Data sending request over RLC failed with 'Internal Error' reason!\n"); - ret= FALSE; - break; + return FALSE; case RLC_OP_STATUS_OUT_OF_RESSOURCES: + pdcp_enb[ctxt_pP->module_id].time_buf_full[pdcp_uid] = pdcp_enb[ctxt_pP->module_id].sfn; LOG_W(PDCP, "Data sending request over RLC failed with 'Out of Resources' reason!\n"); - ret= FALSE; - break; + int h = TM_SKIP_FULL_BUF_MS; + LOG_W(PDCP, "Blocking incoming traffic for %d ms\n", h); + return FALSE; + + case RLC_OP_SKIPPED_FUL_BUF: + LOG_D(PDCP, "Skipping RLC request due to full buffer\n"); + /* fake good return so that GTP doesn't spam us and return immediately so + * that dropped traffic is not counted in PDCP traffic stats */ + return TRUE; default: LOG_W(PDCP, "RLC returned an unknown status code after PDCP placed the order to send some data (Status Code:%d)\n", rlc_status); @@ -408,16 +430,6 @@ boolean_t pdcp_data_req( stop_meas(&UE_pdcp_stats[ctxt_pP->module_id].data_req); } - /* - * Control arrives here only if rlc_data_req() returns RLC_OP_STATUS_OK - * so we return TRUE afterwards - */ - - for (pdcp_uid=0; pdcp_uid< MAX_MOBILES_PER_ENB;pdcp_uid++){ - if (pdcp_enb[ctxt_pP->module_id].rnti[pdcp_uid] == ctxt_pP->rnti ) - break; - } - //LOG_I(PDCP,"ueid %d lcid %d tx seq num %d\n", pdcp_uid, rb_idP+rb_offset, current_sn); Pdcp_stats_tx[ctxt_pP->module_id][pdcp_uid][rb_idP+rb_offset]++; Pdcp_stats_tx_tmp_w[ctxt_pP->module_id][pdcp_uid][rb_idP+rb_offset]++; @@ -1066,6 +1078,7 @@ void pdcp_add_UE(const protocol_ctxt_t* const ctxt_pP){ if (pdcp_enb[ctxt_pP->module_id].rnti[i] == 0 ){ pdcp_enb[ctxt_pP->module_id].rnti[i]=ctxt_pP->rnti; pdcp_enb[ctxt_pP->module_id].uid[i]=i; + pdcp_enb[ctxt_pP->module_id].time_buf_full[i] = 0; pdcp_enb[ctxt_pP->module_id].num_ues++; printf("add new uid is %d %x\n\n", i, ctxt_pP->rnti); // ret=1; diff --git a/openair2/LAYER2/PDCP_v10.1.0/pdcp.h b/openair2/LAYER2/PDCP_v10.1.0/pdcp.h index 054bb5fac41505cee29c05fd4cec395a3b2da8db..1bfe4e022a7112fafda211848b0ed518aa9dce43 100644 --- a/openair2/LAYER2/PDCP_v10.1.0/pdcp.h +++ b/openair2/LAYER2/PDCP_v10.1.0/pdcp.h @@ -106,6 +106,8 @@ typedef struct pdcp_enb_s { uint16_t uid[MAX_MOBILES_PER_ENB]; rnti_t rnti[MAX_MOBILES_PER_ENB]; uint16_t num_ues; +#define TM_SKIP_FULL_BUF_MS (500) + uint64_t time_buf_full[MAX_MOBILES_PER_ENB]; uint64_t sfn; frame_t frame; diff --git a/openair2/LAYER2/RLC/rlc.h b/openair2/LAYER2/RLC/rlc.h index 8c557188a2a2e115300b7ea9d15ef4032da49e1e..dfb22f1a0d55cf3f1828f56b2bc803f4b6688086 100644 --- a/openair2/LAYER2/RLC/rlc.h +++ b/openair2/LAYER2/RLC/rlc.h @@ -67,6 +67,7 @@ typedef uint64_t hash_key_t; #define RLC_OP_STATUS_BAD_PARAMETER 22 #define RLC_OP_STATUS_INTERNAL_ERROR 2 #define RLC_OP_STATUS_OUT_OF_RESSOURCES 3 +#define RLC_OP_SKIPPED_FUL_BUF 4 #define RLC_MUI_UNDEFINED (mui_t)0 diff --git a/openair2/RRC/LTE/rrc_eNB.c b/openair2/RRC/LTE/rrc_eNB.c index 10590bc0194988daa6cea1d8ce0b1af233b4523a..e62596aed610a927cb5a31e7583bae215ee238f2 100644 --- a/openair2/RRC/LTE/rrc_eNB.c +++ b/openair2/RRC/LTE/rrc_eNB.c @@ -6658,7 +6658,7 @@ rrc_eNB_decode_dcch( //WARNING:Inform the controller about the UE activation. Should be moved to RRC agent in the future if (rrc_agent_registered[ctxt_pP->module_id]) { - agent_rrc_xface[ctxt_pP->eNB_index]->flexran_agent_notify_ue_state_change(ctxt_pP->module_id, + agent_rrc_xface[ctxt_pP->module_id]->flexran_agent_notify_ue_state_change(ctxt_pP->module_id, ue_context_p->ue_id_rnti, PROTOCOL__FLEX_UE_STATE_CHANGE_TYPE__FLUESC_UPDATED); } @@ -6804,7 +6804,7 @@ if (ue_context_p->ue_context.nb_of_modify_e_rabs > 0) { //WARNING:Inform the controller about the UE activation. Should be moved to RRC agent in the future if (mac_agent_registered[ctxt_pP->module_id]) { - agent_rrc_xface[ctxt_pP->eNB_index]->flexran_agent_notify_ue_state_change(ctxt_pP->module_id, + agent_rrc_xface[ctxt_pP->module_id]->flexran_agent_notify_ue_state_change(ctxt_pP->module_id, ue_context_p->ue_id_rnti, PROTOCOL__FLEX_UE_STATE_CHANGE_TYPE__FLUESC_ACTIVATED); } @@ -6857,7 +6857,7 @@ if (ue_context_p->ue_context.nb_of_modify_e_rabs > 0) { //WARNING:Inform the controller about the UE activation. Should be moved to RRC agent in the future if (rrc_agent_registered[ctxt_pP->module_id]) { - agent_rrc_xface[ctxt_pP->eNB_index]->flexran_agent_notify_ue_state_change(ctxt_pP->module_id, + agent_rrc_xface[ctxt_pP->module_id]->flexran_agent_notify_ue_state_change(ctxt_pP->module_id, ue_context_p->ue_id_rnti, PROTOCOL__FLEX_UE_STATE_CHANGE_TYPE__FLUESC_ACTIVATED); } diff --git a/openair2/RRC/LTE/rrc_extern.h b/openair2/RRC/LTE/rrc_extern.h index 6c19fa9c7076202b934664657e2e9cc3c3c1508a..59200fb9cc4250c6e9cd13f457b779fd6ed0e757 100644 --- a/openair2/RRC/LTE/rrc_extern.h +++ b/openair2/RRC/LTE/rrc_extern.h @@ -75,7 +75,7 @@ extern uint32_t timeToTrigger_ms[16]; extern float RSRP_meas_mapping[98]; extern float RSRQ_meas_mapping[35]; -extern UE_PF_PO_t UE_PF_PO[MAX_NUM_CCs][MAX_MOBILES_PER_ENB]; +extern UE_PF_PO_t UE_PF_PO[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; extern pthread_mutex_t ue_pf_po_mutex; extern uint16_t reestablish_rnti_map[MAX_MOBILES_PER_ENB][2]; diff --git a/openair2/RRC/LTE/rrc_vars.h b/openair2/RRC/LTE/rrc_vars.h index 8a718581014a8c9d87fbf5ccffa8c76b1e7db732..028a59f7d96915928827a8c24090f40a7efc7c7a 100644 --- a/openair2/RRC/LTE/rrc_vars.h +++ b/openair2/RRC/LTE/rrc_vars.h @@ -36,7 +36,7 @@ #include "COMMON/mac_rrc_primitives.h" #include "LAYER2/MAC/mac.h" -UE_PF_PO_t UE_PF_PO[MAX_NUM_CCs][MAX_MOBILES_PER_ENB]; +UE_PF_PO_t UE_PF_PO[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; pthread_mutex_t ue_pf_po_mutex; UE_RRC_INST *UE_rrc_inst; #include "LAYER2/MAC/mac_extern.h" diff --git a/targets/RT/USER/lte-enb.c b/targets/RT/USER/lte-enb.c index a2f255d4d890fd295552016c55469afac7b7b6d0..8c6f6dd766881ef32dc8cd3a7b635dd67a293d28 100644 --- a/targets/RT/USER/lte-enb.c +++ b/targets/RT/USER/lte-enb.c @@ -781,10 +781,10 @@ static void* eNB_thread_prach( void* param ) { while (!oai_exit) { - if (oai_exit) break; if (wait_on_condition(&proc->mutex_prach,&proc->cond_prach,&proc->instance_cnt_prach,"eNB_prach_thread") < 0) break; + if (oai_exit) break; LOG_D(PHY,"Running eNB prach procedures\n"); prach_procedures(eNB @@ -822,10 +822,10 @@ static void* eNB_thread_prach_br( void* param ) { while (!oai_exit) { - if (oai_exit) break; if (wait_on_condition(&proc->mutex_prach_br,&proc->cond_prach_br,&proc->instance_cnt_prach_br,"eNB_prach_thread_br") < 0) break; + if (oai_exit) break; LOG_D(PHY,"Running eNB prach procedures for BL/CE UEs\n"); prach_procedures(eNB,1); @@ -1029,8 +1029,10 @@ void kill_eNB_proc(int inst) { proc = &eNB->proc; proc_rxtx = &proc->proc_rxtx[0]; - kill_td_thread(eNB); - kill_te_thread(eNB); + if(get_nprocs() > 2 && codingw) { + kill_td_thread(eNB); + kill_te_thread(eNB); + } LOG_I(PHY, "Killing TX CC_id %d inst %d\n", CC_id, inst ); for (i=0; i<2; i++) { pthread_mutex_lock(&proc_rxtx[i].mutex_rxtx); @@ -1039,8 +1041,10 @@ void kill_eNB_proc(int inst) { pthread_cond_signal(&proc_rxtx[i].cond_rxtx); pthread_mutex_unlock(&proc_rxtx[i].mutex_rxtx); } + pthread_mutex_lock(&proc->mutex_prach); proc->instance_cnt_prach = 0; pthread_cond_signal( &proc->cond_prach ); + pthread_mutex_unlock(&proc->mutex_prach); pthread_cond_signal( &proc->cond_asynch_rxtx ); pthread_cond_broadcast(&sync_phy_proc.cond_phy_proc_tx); @@ -1067,6 +1071,16 @@ void kill_eNB_proc(int inst) { pthread_mutex_destroy( &proc_rxtx[i].mutex_rxtx ); pthread_cond_destroy( &proc_rxtx[i].cond_rxtx ); } + + pthread_attr_destroy(&proc->attr_prach); + pthread_attr_destroy(&proc->attr_asynch_rxtx); + pthread_attr_destroy(&proc_rxtx[0].attr_rxtx); + pthread_attr_destroy(&proc_rxtx[1].attr_rxtx); +#ifdef Rel14 + pthread_mutex_destroy(&proc->mutex_RU_PRACH_br); + pthread_attr_destroy(&proc->attr_prach_br); +#endif + } } @@ -1104,10 +1118,10 @@ void free_transport(PHY_VARS_eNB *eNB) int j; for (i=0; i<NUMBER_OF_UE_MAX; i++) { - LOG_I(PHY, "Freeing Transport Channel Buffers for DLSCH, UE %d\n",i); + LOG_D(PHY, "Freeing Transport Channel Buffers for DLSCH, UE %d\n",i); for (j=0; j<2; j++) free_eNB_dlsch(eNB->dlsch[i][j]); - LOG_I(PHY, "Freeing Transport Channel Buffer for ULSCH, UE %d\n",i); + LOG_D(PHY, "Freeing Transport Channel Buffer for ULSCH, UE %d\n",i); free_eNB_ulsch(eNB->ulsch[1+i]); } free_eNB_ulsch(eNB->ulsch[0]); diff --git a/targets/RT/USER/lte-ru.c b/targets/RT/USER/lte-ru.c index 578b647d96b33a479147e1678a5cd0477066380f..d20b614ad79d4332bc10915d270106bc67c3340b 100644 --- a/targets/RT/USER/lte-ru.c +++ b/targets/RT/USER/lte-ru.c @@ -1080,8 +1080,8 @@ static void* ru_thread_prach( void* param ) { while (!oai_exit) { - if (oai_exit) break; if (wait_on_condition(&proc->mutex_prach,&proc->cond_prach,&proc->instance_cnt_prach,"ru_prach_thread") < 0) break; + if (oai_exit) break; VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_RU_PRACH_RX, 1 ); if (ru->eNB_list[0]){ prach_procedures( @@ -1130,8 +1130,8 @@ static void* ru_thread_prach_br( void* param ) { while (!oai_exit) { - if (oai_exit) break; if (wait_on_condition(&proc->mutex_prach_br,&proc->cond_prach_br,&proc->instance_cnt_prach_br,"ru_prach_thread_br") < 0) break; + if (oai_exit) break; rx_prach(NULL, ru, NULL, @@ -2230,11 +2230,36 @@ void init_RU_proc(RU_t *ru) { } -void kill_RU_proc(int inst) +void kill_RU_proc(RU_t *ru) { - RU_t *ru = RC.ru[inst]; RU_proc_t *proc = &ru->proc; +#if defined(PRE_SCD_THREAD) + pthread_mutex_lock(&proc->mutex_pre_scd); + ru->proc.instance_pre_scd = 0; + pthread_cond_signal(&proc->cond_pre_scd); + pthread_mutex_unlock(&proc->mutex_pre_scd); + pthread_join(proc->pthread_pre_scd, NULL); + pthread_mutex_destroy(&proc->mutex_pre_scd); + pthread_cond_destroy(&proc->cond_pre_scd); +#endif +#ifdef PHY_TX_THREAD + pthread_mutex_lock(&proc->mutex_phy_tx); + proc->instance_cnt_phy_tx = 0; + pthread_cond_signal(&proc->cond_phy_tx); + pthread_mutex_unlock(&proc->mutex_phy_tx); + pthread_join(ru->proc.pthread_phy_tx, NULL); + pthread_mutex_destroy( &proc->mutex_phy_tx); + pthread_cond_destroy( &proc->cond_phy_tx); + pthread_mutex_lock(&proc->mutex_rf_tx); + proc->instance_cnt_rf_tx = 0; + pthread_cond_signal(&proc->cond_rf_tx); + pthread_mutex_unlock(&proc->mutex_rf_tx); + pthread_join(proc->pthread_rf_tx, NULL); + pthread_mutex_destroy( &proc->mutex_rf_tx); + pthread_cond_destroy( &proc->cond_rf_tx); +#endif + if (get_nprocs() > 2 && fepw) { LOG_D(PHY, "killing FEP thread\n"); kill_fep_thread(ru); @@ -2271,8 +2296,10 @@ void kill_RU_proc(int inst) pthread_mutex_lock(&proc->mutex_eNBs); proc->ru_tx_ready = 0; - proc->instance_cnt_eNBs = 0; - pthread_cond_signal(&proc->cond_eNBs); + proc->instance_cnt_eNBs = 1; + // cond_eNBs is used by both ru_thread and ru_thread_tx, so we need to send + // a broadcast to wake up both threads + pthread_cond_broadcast(&proc->cond_eNBs); pthread_mutex_unlock(&proc->mutex_eNBs); pthread_mutex_lock(&proc->mutex_asynch_rxtx); @@ -2280,10 +2307,12 @@ void kill_RU_proc(int inst) pthread_cond_signal(&proc->cond_asynch_rxtx); pthread_mutex_unlock(&proc->mutex_asynch_rxtx); - /*LOG_D(PHY, "Joining pthread_FH\n"); + LOG_D(PHY, "Joining pthread_FH\n"); pthread_join(proc->pthread_FH, NULL); - LOG_D(PHY, "Joining pthread_FHTX\n"); - pthread_join(proc->pthread_FH1, NULL);*/ + if (!single_thread_flag && get_nprocs() > 4) { + LOG_D(PHY, "Joining pthread_FHTX\n"); + pthread_join(proc->pthread_FH1, NULL); + } if (ru->function == NGFI_RRU_IF4p5) { LOG_D(PHY, "Joining pthread_prach\n"); pthread_join(proc->pthread_prach, NULL); @@ -2755,45 +2784,11 @@ void init_RU(char *rf_config_file) { } - - - -void stop_ru(RU_t *ru) { - -#if defined(PRE_SCD_THREAD) || defined(PHY_TX_THREAD) - int *status; -#endif - printf("Stopping RU %p processing threads\n",(void*)ru); -#if defined(PRE_SCD_THREAD) - if(ru){ - ru->proc.instance_pre_scd = 0; - pthread_cond_signal( &ru->proc.cond_pre_scd ); - pthread_join(ru->proc.pthread_pre_scd, (void**)&status ); - pthread_mutex_destroy(&ru->proc.mutex_pre_scd ); - pthread_cond_destroy(&ru->proc.cond_pre_scd ); - } -#endif -#ifdef PHY_TX_THREAD - if(ru){ - ru->proc.instance_cnt_phy_tx = 0; - pthread_cond_signal(&ru->proc.cond_phy_tx); - pthread_join( ru->proc.pthread_phy_tx, (void**)&status ); - pthread_mutex_destroy( &ru->proc.mutex_phy_tx ); - pthread_cond_destroy( &ru->proc.cond_phy_tx ); - ru->proc.instance_cnt_rf_tx = 0; - pthread_cond_signal(&ru->proc.cond_rf_tx); - pthread_join( ru->proc.pthread_rf_tx, (void**)&status ); - pthread_mutex_destroy( &ru->proc.mutex_rf_tx ); - pthread_cond_destroy( &ru->proc.cond_rf_tx ); - } -#endif -} - void stop_RU(int nb_ru) { for (int inst = 0; inst < nb_ru; inst++) { LOG_I(PHY, "Stopping RU %d processing threads\n", inst); - kill_RU_proc(inst); + kill_RU_proc(RC.ru[inst]); } } diff --git a/targets/RT/USER/lte-softmodem.c b/targets/RT/USER/lte-softmodem.c index 2d5f04d38680846c0b252373dbe74e6174644d16..aa8872679eedeb228182146f6c21c12c9ab80d0d 100644 --- a/targets/RT/USER/lte-softmodem.c +++ b/targets/RT/USER/lte-softmodem.c @@ -448,46 +448,22 @@ void *l2l1_task(void *arg) { itti_set_task_real_time(TASK_L2L1); itti_mark_task_ready(TASK_L2L1); - /* Wait for the initialize message */ - printf("Wait for the ITTI initialize message\n"); - do { - if (message_p != NULL) { - result = itti_free (ITTI_MSG_ORIGIN_ID(message_p), message_p); - AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result); - } - - itti_receive_msg (TASK_L2L1, &message_p); - - switch (ITTI_MSG_ID(message_p)) { - case INITIALIZE_MESSAGE: - /* Start eNB thread */ - printf("L2L1 TASK received %s\n", ITTI_MSG_NAME(message_p)); - start_eNB = 1; - break; - - case TERMINATE_MESSAGE: - printf("received terminate message\n"); - oai_exit=1; - start_eNB = 0; - itti_exit_task (); - break; - - default: - printf("Received unexpected message %s\n", ITTI_MSG_NAME(message_p)); - break; - } - } while (ITTI_MSG_ID(message_p) != INITIALIZE_MESSAGE); - - result = itti_free (ITTI_MSG_ORIGIN_ID(message_p), message_p); - AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result); -/* ???? no else but seems to be UE only ??? - do { - // Wait for a message + /* Wait for the initialize message */ + printf("Wait for the ITTI initialize message\n"); + while (1) { itti_receive_msg (TASK_L2L1, &message_p); switch (ITTI_MSG_ID(message_p)) { + case INITIALIZE_MESSAGE: + /* Start eNB thread */ + LOG_D(PHY, "L2L1 TASK received %s\n", ITTI_MSG_NAME(message_p)); + start_eNB = 1; + break; + case TERMINATE_MESSAGE: + LOG_W(PHY, " *** Exiting L2L1 thread\n"); oai_exit=1; + start_eNB = 0; itti_exit_task (); break; @@ -510,8 +486,9 @@ void *l2l1_task(void *arg) { result = itti_free (ITTI_MSG_ORIGIN_ID(message_p), message_p); AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result); - } while(!oai_exit); -*/ + message_p = NULL; + }; + return NULL; } #endif @@ -772,12 +749,13 @@ void wait_eNBs(void) { /* * helper function to terminate a certain ITTI task */ -void terminate_task(task_id_t task_id, module_id_t mod_id) +void terminate_task(module_id_t mod_id, task_id_t from, task_id_t to) { - LOG_I(ENB_APP, "sending TERMINATE_MESSAGE to task %s (%d)\n", itti_get_task_name(task_id), task_id); + LOG_I(ENB_APP, "sending TERMINATE_MESSAGE from task %s (%d) to task %s (%d)\n", + itti_get_task_name(from), from, itti_get_task_name(to), to); MessageDef *msg; - msg = itti_alloc_new_message (ENB_APP, TERMINATE_MESSAGE); - itti_send_msg_to_task (task_id, ENB_MODULE_ID_TO_INSTANCE(mod_id), msg); + msg = itti_alloc_new_message (from, TERMINATE_MESSAGE); + itti_send_msg_to_task (to, ENB_MODULE_ID_TO_INSTANCE(mod_id), msg); } extern void free_transport(PHY_VARS_eNB *); @@ -786,39 +764,20 @@ extern void phy_free_RU(RU_t*); int stop_L1L2(module_id_t enb_id) { LOG_W(ENB_APP, "stopping lte-softmodem\n"); - oai_exit = 1; if (!RC.ru) { LOG_UI(ENB_APP, "no RU configured\n"); return -1; } - /* stop trx devices, multiple carrier currently not supported by RU */ - if (RC.ru[enb_id]) { - if (RC.ru[enb_id]->rfdevice.trx_stop_func) { - RC.ru[enb_id]->rfdevice.trx_stop_func(&RC.ru[enb_id]->rfdevice); - LOG_I(ENB_APP, "turned off RU rfdevice\n"); - } else { - LOG_W(ENB_APP, "can not turn off rfdevice due to missing trx_stop_func callback, proceding anyway!\n"); - } - if (RC.ru[enb_id]->ifdevice.trx_stop_func) { - RC.ru[enb_id]->ifdevice.trx_stop_func(&RC.ru[enb_id]->ifdevice); - LOG_I(ENB_APP, "turned off RU ifdevice\n"); - } else { - LOG_W(ENB_APP, "can not turn off ifdevice due to missing trx_stop_func callback, proceding anyway!\n"); - } - } else { - LOG_W(ENB_APP, "no RU found for index %d\n", enb_id); - return -1; - } - /* these tasks need to pick up new configuration */ - terminate_task(TASK_RRC_ENB, enb_id); - terminate_task(TASK_L2L1, enb_id); + terminate_task(enb_id, TASK_ENB_APP, TASK_RRC_ENB); + terminate_task(enb_id, TASK_ENB_APP, TASK_L2L1); + oai_exit = 1; + LOG_I(ENB_APP, "calling kill_RU_proc() for instance %d\n", enb_id); + kill_RU_proc(RC.ru[enb_id]); LOG_I(ENB_APP, "calling kill_eNB_proc() for instance %d\n", enb_id); kill_eNB_proc(enb_id); - LOG_I(ENB_APP, "calling kill_RU_proc() for instance %d\n", enb_id); - kill_RU_proc(enb_id); oai_exit = 0; for (int cc_id = 0; cc_id < RC.nb_CC[enb_id]; cc_id++) { free_transport(RC.eNB[enb_id][cc_id]); @@ -841,7 +800,9 @@ int restart_L1L2(module_id_t enb_id) LOG_W(ENB_APP, "restarting lte-softmodem\n"); /* block threads */ + pthread_mutex_lock(&sync_mutex); sync_var = -1; + pthread_mutex_unlock(&sync_mutex); for (cc_id = 0; cc_id < RC.nb_L1_CC[enb_id]; cc_id++) { RC.eNB[enb_id][cc_id]->configured = 0; @@ -852,6 +813,9 @@ int restart_L1L2(module_id_t enb_id) /* TODO this should be done for all RUs associated to this eNB */ memcpy(&ru->frame_parms, &RC.eNB[enb_id][0]->frame_parms, sizeof(LTE_DL_FRAME_PARMS)); set_function_spec_param(RC.ru[enb_id]); + /* reset the list of connected UEs in the MAC, since in this process with + * loose all UEs (have to reconnect) */ + init_UE_list(&RC.mac[enb_id]->UE_list); LOG_I(ENB_APP, "attempting to create ITTI tasks\n"); if (itti_create_task (TASK_RRC_ENB, rrc_enb_task, NULL) < 0) { @@ -1263,25 +1227,20 @@ int main( int argc, char **argv ) printf("stopping MODEM threads\n"); - // cleanup - for (ru_id=0;ru_id<RC.nb_RU;ru_id++) { - stop_ru(RC.ru[ru_id]); - } - - stop_eNB(NB_eNB_INST); - stop_RU(RC.nb_RU); - /* release memory used by the RU/eNB threads (incomplete), after all - * threads have been stopped (they partially use the same memory) */ - for (int inst = 0; inst < NB_eNB_INST; inst++) { - for (int cc_id = 0; cc_id < RC.nb_CC[inst]; cc_id++) { - free_transport(RC.eNB[inst][cc_id]); - phy_free_lte_eNB(RC.eNB[inst][cc_id]); - } - } - for (int inst = 0; inst < RC.nb_RU; inst++) { - phy_free_RU(RC.ru[inst]); + stop_eNB(NB_eNB_INST); + stop_RU(RC.nb_RU); + /* release memory used by the RU/eNB threads (incomplete), after all + * threads have been stopped (they partially use the same memory) */ + for (int inst = 0; inst < NB_eNB_INST; inst++) { + for (int cc_id = 0; cc_id < RC.nb_CC[inst]; cc_id++) { + free_transport(RC.eNB[inst][cc_id]); + phy_free_lte_eNB(RC.eNB[inst][cc_id]); } - free_lte_top(); + } + for (int inst = 0; inst < RC.nb_RU; inst++) { + phy_free_RU(RC.ru[inst]); + } + free_lte_top(); printf("About to call end_configmodule() from %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__); end_configmodule(); diff --git a/targets/RT/USER/lte-softmodem.h b/targets/RT/USER/lte-softmodem.h index 4728dce18ac6a1eb1ad05d9bd0116b6a1b2964d7..b421181f37ab1ebfb9efaf06c07af0d4aa5ff79c 100644 --- a/targets/RT/USER/lte-softmodem.h +++ b/targets/RT/USER/lte-softmodem.h @@ -252,7 +252,7 @@ extern void init_RU(const char*); extern void stop_ru(RU_t *ru); extern void init_RU_proc(RU_t *ru); extern void stop_RU(int nb_ru); -extern void kill_RU_proc(int inst); +extern void kill_RU_proc(RU_t *ru); extern void set_function_spec_param(RU_t *ru); // In lte-ue.c