diff --git a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c index e6989d676c931590a4335d4431273819577853d9..22ea1142736a718cde249dda20921093143c5379 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c @@ -537,6 +537,19 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in sorting_policy_current[i] = sorting_policy[i]; } + // Check for new accounting policy + if (accounting_policy_current[i] != accounting_policy[i]) { + if (accounting_policy[i] > 1 || accounting_policy[i] < 0) { + LOG_W(MAC,"[eNB %d][SLICE %d][DL] frame %d subframe %d: invalid accounting policy (%d), revert to its previous value (%d)\n", + module_idP, i, frameP, subframeP, accounting_policy[i], accounting_policy_current[i]); + accounting_policy[i] = accounting_policy_current[i]; + } else { + LOG_N(MAC,"[eNB %d][SLICE %d][DL] frame %d subframe %d: UE sorting policy has changed (%x-->%x)\n", + module_idP, i, frameP, subframeP, accounting_policy_current[i], accounting_policy[i]); + accounting_policy_current[i] = accounting_policy[i]; + } + } + // Run each enabled slice-specific schedulers one by one slice_sched_dl[i](module_idP, i, frameP, subframeP, mbsfn_flag/*, dl_info*/); } diff --git a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.h b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.h index 29ce735b055dad7bfe19e260025dc1fe1872d6bb..2112bc40384d4f2c2cbe350b83d00db40ea37a17 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.h +++ b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.h @@ -71,6 +71,10 @@ char *dl_scheduler_type[MAX_NUM_SLICES] = uint32_t sorting_policy[MAX_NUM_SLICES] = {0x01234, 0x01234, 0x01234, 0x01234}; uint32_t sorting_policy_current[MAX_NUM_SLICES] = {0x01234, 0x01234, 0x01234, 0x01234}; +// Accounting policy (just greedy(1) or fair(0) setting for now) +int accounting_policy[MAX_NUM_SLICES] = {0, 0, 0, 0}; +int accounting_policy_current[MAX_NUM_SLICES] = {0, 0, 0, 0}; + // pointer to the slice specific scheduler slice_scheduler_dl slice_sched_dl[MAX_NUM_SLICES] = {0}; diff --git a/openair2/LAYER2/MAC/pre_processor.c b/openair2/LAYER2/MAC/pre_processor.c index cba110894f049105e04383958d1c8edaec588757..253f5736a6730da419edb338fbff19cb2b39a1d0 100644 --- a/openair2/LAYER2/MAC/pre_processor.c +++ b/openair2/LAYER2/MAC/pre_processor.c @@ -58,6 +58,7 @@ extern float slice_percentage[MAX_NUM_SLICES]; extern float slice_percentage_uplink[MAX_NUM_SLICES]; extern int slice_position[MAX_NUM_SLICES*2]; extern uint32_t sorting_policy[MAX_NUM_SLICES]; +extern int accounting_policy[MAX_NUM_SLICES]; extern int slice_maxmcs[MAX_NUM_SLICES]; extern int slice_maxmcs_uplink[MAX_NUM_SLICES]; @@ -596,6 +597,7 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, rnti_t rnti; uint8_t harq_pid, round; + uint16_t available_rbs; uint8_t rbs_retx[NFAPI_CC_MAX]; uint16_t average_rbs_per_user[NFAPI_CC_MAX]; int ue_count_newtx[NFAPI_CC_MAX]; @@ -622,7 +624,6 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, // Find total UE count, and account the RBs required for retransmissions for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { rnti = UE_RNTI(Mod_id, UE_id); - if (rnti == NOT_A_RNTI) continue; if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; if (!ue_slice_membership(UE_id, slice_id)) continue; @@ -653,38 +654,77 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, } } - // loop over all active UEs and calculate avg rb per user based on total active UEs - for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - rnti = UE_RNTI(Mod_id, UE_id); - - if (rnti == NOT_A_RNTI) continue; - if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; - if (!ue_slice_membership(UE_id, slice_id)) continue; - - for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) { - CC_id = UE_list->ordered_CCids[i][UE_id]; + switch (accounting_policy[slice_id]) { - N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth) - rbs_retx[CC_id]; + // If greedy scheduling, try to account all the required RBs + case 1: + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + rnti = UE_RNTI(Mod_id, UE_id); + if (rnti == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_slice_membership(UE_id, slice_id)) continue; - // recalculate based on the what is left after retransmission - ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; - ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id] = nb_rbs_allowed_slice(slice_percentage[slice_id], N_RB_DL); + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) { + CC_id = UE_list->ordered_CCids[i][UE_id]; + nb_rbs_accounted[CC_id][UE_id] = nb_rbs_required[CC_id][UE_id]; + } + } + break; + + // Use the old, fair algorithm + // Loop over all active UEs and account the avg number of RBs to each UE, based on all non-retx UEs. + default: + // FIXME: This is not ideal, why loop on UEs to find average_rbs_per_user[], that is per-CC? + // TODO: Look how to loop on active CCs only without using the UE_num_active_CC() function. + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + rnti = UE_RNTI(Mod_id, UE_id); + + if (rnti == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_slice_membership(UE_id, slice_id)) continue; + + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) { + CC_id = UE_list->ordered_CCids[i][UE_id]; + ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; + + N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth) - rbs_retx[CC_id]; + // recalculate based on the what is left after retransmission + ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id] = nb_rbs_allowed_slice(slice_percentage[slice_id], N_RB_DL); + + available_rbs = ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id]; + + if (total_ue_count[CC_id] == 0) { + average_rbs_per_user[CC_id] = 0; + } else if ((min_rb_unit[CC_id] * total_ue_count[CC_id]) <= available_rbs) { + average_rbs_per_user[CC_id] = (uint16_t) floor(available_rbs / total_ue_count[CC_id]); + } else { + // consider the total number of use that can be scheduled UE + average_rbs_per_user[CC_id] = (uint16_t)min_rb_unit[CC_id]; + } + } + } - if (total_ue_count[CC_id] == 0) { - average_rbs_per_user[CC_id] = 0; - } else if ((min_rb_unit[CC_id] * total_ue_count[CC_id]) <= - (ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id])) { - average_rbs_per_user[CC_id] = - (uint16_t) floor(ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id] / total_ue_count[CC_id]); - } else { - // consider the total number of use that can be scheduled UE - average_rbs_per_user[CC_id] = (uint16_t)min_rb_unit[CC_id]; + // note: nb_rbs_required is assigned according to total_buffer_dl + // extend nb_rbs_required to capture per LCID RB required + for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + rnti = UE_RNTI(Mod_id, UE_id); + if (rnti == NOT_A_RNTI) continue; + if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue; + if (!ue_slice_membership(UE_id, slice_id)) continue; + + for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) { + CC_id = UE_list->ordered_CCids[i][UE_id]; + nb_rbs_accounted[CC_id][UE_id] = cmin(average_rbs_per_user[CC_id], nb_rbs_required[CC_id][UE_id]); + } } - } + break; } - // note: nb_rbs_required is assigned according to total_buffer_dl - // extend nb_rbs_required to capture per LCID RB required + + + + // Check retransmissions + // TODO: Do this once at the beginning for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { rnti = UE_RNTI(Mod_id, UE_id); if (rnti == NOT_A_RNTI) continue; @@ -702,8 +742,6 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, /* TODO: do we have to check for retransmission? */ if (mac_eNB_get_rrc_status(Mod_id, rnti) < RRC_RECONFIGURED || round != 8) { nb_rbs_accounted[CC_id][UE_id] = nb_rbs_required[CC_id][UE_id]; - } else { - nb_rbs_accounted[CC_id][UE_id] = cmin(average_rbs_per_user[CC_id], nb_rbs_required[CC_id][UE_id]); } } }