Commit ad94c8aa authored by Anton Beloglazov's avatar Anton Beloglazov

Fixed VmSchedulerTimeSharedOverSubscription, VmSchedulerTimeShared, and tests

parent 44d41b81
......@@ -13,7 +13,6 @@ import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.cloudbus.cloudsim.lists.PeList;
import org.cloudbus.cloudsim.provisioners.PeProvisioner;
......@@ -21,7 +20,7 @@ import org.cloudbus.cloudsim.provisioners.PeProvisioner;
/**
* VmSchedulerTimeShared is a VMM allocation policy that allocates one or more Pe to a VM, and
* allows sharing of PEs by multiple VMs. This class also implements 10% performance degration due
* to VM migration.
* to VM migration. This scheduler does not support over-subscription.
*
* @author Rodrigo N. Calheiros
* @author Anton Beloglazov
......@@ -88,11 +87,16 @@ public class VmSchedulerTimeShared extends VmScheduler {
totalRequestedMips += mips;
}
// This scheduler does not allow over-subscription
if (getAvailableMips() < totalRequestedMips) {
return false;
}
getMipsMapRequested().put(vmUid, mipsShareRequested);
setPesInUse(getPesInUse() + mipsShareRequested.size());
if (getVmsMigratingIn().contains(vmUid)) {
// performance cost incurred by the destination host = 10% MIPS
// the destination host only experience 10% of the migrating VM's MIPS
totalRequestedMips *= 0.1;
}
......@@ -102,92 +106,18 @@ public class VmSchedulerTimeShared extends VmScheduler {
// performance degradation due to migration = 10% MIPS
mipsRequested *= 0.9;
} else if (getVmsMigratingIn().contains(vmUid)) {
// performance cost incurred by the destination host = 10% MIPS
// the destination host only experience 10% of the migrating VM's MIPS
mipsRequested *= 0.1;
}
mipsShareAllocated.add(mipsRequested);
}
if (getAvailableMips() >= totalRequestedMips) {
getMipsMap().put(vmUid, mipsShareAllocated);
setAvailableMips(getAvailableMips() - totalRequestedMips);
} else {
updateShortage();
}
return true;
}
/**
* This method recalculates distribution of MIPs among VMs considering eventual shortage of MIPS
* compared to the amount requested by VMs.
*/
protected void updateShortage() {
// first, we have to know the weight of each VM in the allocation of mips. We want to keep
// allocation proportional to it
HashMap<String, Double> weightMap = new HashMap<String, Double>();
double totalRequiredMips = 0.0;
Iterator<Entry<String, List<Double>>> iter = mipsMapRequested.entrySet().iterator();
while (iter.hasNext()) {
Entry<String, List<Double>> entry = iter.next();
// each element of the iterator is one entry of the table: (vmId, mipsShare)
String vmId = entry.getKey();
List<Double> shares = entry.getValue();
// count amount of mips required by the vm
double requiredMipsByThisVm = 0.0;
for (double share : shares) {
totalRequiredMips += share;
requiredMipsByThisVm += share;
}
// store the value to use later to define weights
weightMap.put(vmId, requiredMipsByThisVm);
}
// now, we have the information on individual weights
// use this information to define actual shares of
// mips received by each VM
iter = mipsMapRequested.entrySet().iterator();
while (iter.hasNext()) {
Entry<String, List<Double>> entry = iter.next();
// each element of the iterator is one entry of the table: (vmId, mipsShare)
String vmId = entry.getKey();
List<Double> shares = entry.getValue();
// get weight of this vm
double vmWeight = weightMap.get(vmId) / totalRequiredMips;
// update actual received share
LinkedList<Double> updatedSharesList = new LinkedList<Double>();
double actuallyAllocatedMips = 0.0;
for (double share : shares) {
double updatedShare = share * vmWeight;
// update share if migrating
if (getVmsMigratingOut().contains(vmId)) {
// performance degradation due to migration = 10% MIPS
updatedShare *= 0.9;
} else if (getVmsMigratingIn().contains(vmId)) {
// performance cost incurred by the destination host = 10% MIPS
updatedShare *= 0.1;
}
actuallyAllocatedMips += updatedShare;
updatedSharesList.add(updatedShare);
}
// add in the new map
getMipsMap().put(vmId, updatedSharesList);
setAvailableMips(getAvailableMips() - actuallyAllocatedMips);
}
}
/**
* Update allocation of VMs on PEs.
*/
......@@ -222,7 +152,7 @@ public class VmSchedulerTimeShared extends VmScheduler {
}
if (!peIterator.hasNext()) {
Log.printLine("There is no enough MIPS (" + mips + ") to accommodate VM " + vmUid);
System.exit(0);
// System.exit(0);
}
pe = peIterator.next();
peProvisioner = pe.getPeProvisioner();
......
/*
* Title: CloudSim Toolkit
* Description: CloudSim (Cloud Simulation) Toolkit for Modeling and Simulation of Clouds
* Licence: GPL - http://www.gnu.org/copyleft/gpl.html
* Title: CloudSim Toolkit Description: CloudSim (Cloud Simulation) Toolkit for Modeling and
* Simulation of Clouds Licence: GPL - http://www.gnu.org/copyleft/gpl.html
*
* Copyright (c) 2009-2010, The University of Melbourne, Australia
*/
......@@ -12,8 +11,14 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import org.cloudbus.cloudsim.lists.PeList;
import org.cloudbus.cloudsim.util.MathUtil;
/**
* The Class VmSchedulerTimeSharedOverSubscription.
* This is a Time-Shared VM Scheduler, which allows over-subscription. In other words, the scheduler
* still allows the allocation of VMs that require more CPU capacity that is available.
* Oversubscription results in performance degradation. Each virtual PE cannot be allocated more CPU
* capacity than MIPS of a single PE.
*/
public class VmSchedulerTimeSharedOverSubscription extends VmSchedulerTimeShared {
......@@ -27,69 +32,113 @@ public class VmSchedulerTimeSharedOverSubscription extends VmSchedulerTimeShared
}
/**
* Allocate pes for vm. The policy allow over-subscription. In other words,
* the policy still allows the allocation of VMs that require more CPU capacity
* that is available. Oversubscription results in performance degradation. Each
* virtual PE cannot be allocated more CPU capacity than MIPS of a single PE.
* Allocate pes for vm. The policy allows over-subscription. In other words, the policy still
* allows the allocation of VMs that require more CPU capacity that is available.
* Oversubscription results in performance degradation. Each virtual PE cannot be allocated more
* CPU capacity than MIPS of a single PE.
*
* @param vmUid the vm uid
* @param mipsShareRequested the mips share requested
*
* @return true, if successful
*/
@Override
protected boolean allocatePesForVm(String vmUid, List<Double> mipsShareRequested) {
getMipsMapRequested().put(vmUid, mipsShareRequested);
setPesInUse(getPesInUse() + mipsShareRequested.size());
double totalRequestedMips = 0;
double peMips = getPeCapacity();
for (Double mips : mipsShareRequested) {
// each virtual PE of a VM must require not more than the capacity of a physical PE
if (mips > peMips) {
return false;
}
totalRequestedMips += mips;
}
getMipsMapRequested().put(vmUid, mipsShareRequested);
setPesInUse(getPesInUse() + mipsShareRequested.size());
if (getVmsMigratingIn().contains(vmUid)) {
totalRequestedMips *= 0.1; // performance cost incurred by the destination host = 10% MIPS
// the destination host only experience 10% of the migrating VM's MIPS
totalRequestedMips *= 0.1;
}
double peMips = getPeCapacity();
if (getAvailableMips() >= totalRequestedMips) {
List<Double> mipsShareAllocated = new ArrayList<Double>();
for (Double mipsRequested : mipsShareRequested) {
if (mipsRequested > peMips) {
mipsRequested = peMips; // Over-subscription is implemented by the cost of performance degradation
}
if (getVmsMigratingOut().contains(vmUid)) {
mipsRequested *= 0.9; // performance degradation due to migration = 10% MIPS
// performance degradation due to migration = 10% MIPS
mipsRequested *= 0.9;
} else if (getVmsMigratingIn().contains(vmUid)) {
mipsRequested *= 0.1; // performance cost incurred by the destination host = 10% MIPS
// the destination host only experience 10% of the migrating VM's MIPS
mipsRequested *= 0.1;
}
mipsShareAllocated.add(mipsRequested);
}
if (getAvailableMips() >= totalRequestedMips) {
getMipsMap().put(vmUid, mipsShareAllocated);
setAvailableMips(getAvailableMips() - totalRequestedMips);
} else {
int pesSkipped = 0;
for (Entry<String, List<Double>> entry : getMipsMap().entrySet()) {
List<Double> mipsMap = entry.getValue();
if (getVmsMigratingIn().contains(entry.getKey())) {
pesSkipped += mipsMap.size();
continue;
redistributeMipsDueToOverSubscription();
}
return true;
}
for (int i = 0; i < mipsMap.size(); i++) {
if (mipsMap.get(i) == 0) {
pesSkipped++;
/**
* This method recalculates distribution of MIPs among VMs considering eventual shortage of MIPS
* compared to the amount requested by VMs.
*/
protected void redistributeMipsDueToOverSubscription() {
// First, we calculate the scaling factor - the MIPS allocation for all VMs will be scaled
// proportionally
double totalRequiredMipsByAllVms = 0;
for (Entry<String, List<Double>> entry : getMipsMapRequested().entrySet()) {
double requiredMipsByThisVm = MathUtil.sum(entry.getValue());
if (getVmsMigratingIn().contains(entry.getKey())) {
// the destination host only experience 10% of the migrating VM's MIPS
requiredMipsByThisVm *= 0.1;
}
totalRequiredMipsByAllVms += requiredMipsByThisVm;
}
double totalAvailableMips = PeList.getTotalMips(getPeList());
double scalingFactor = totalAvailableMips / totalRequiredMipsByAllVms;
// Clear the old MIPS allocation
getMipsMap().clear();
// Update the actual MIPS allocated to the VMs
for (Entry<String, List<Double>> entry : getMipsMapRequested().entrySet()) {
String vmUid = entry.getKey();
List<Double> requestedMips = entry.getValue();
List<Double> updatedMipsAllocation = new ArrayList<Double>();
for (Double mips : requestedMips) {
if (getVmsMigratingOut().contains(vmUid)) {
// the original amount is scaled
mips *= scalingFactor;
// performance degradation due to migration = 10% MIPS
mips *= 0.9;
} else if (getVmsMigratingIn().contains(vmUid)) {
// the destination host only experiences 10% of the migrating VM's MIPS
mips *= 0.1;
// the final 10% are scaled
mips *= scalingFactor;
} else {
mips *= scalingFactor;
}
if (getVmsMigratingIn().contains(vmUid)) {
pesSkipped += mipsShareRequested.size();
updatedMipsAllocation.add(Math.floor(mips));
}
updateShortage();
// add in the new map
getMipsMap().put(vmUid, updatedMipsAllocation);
}
return true;
// As the host is oversubscribed, there no more available MIPS
setAvailableMips(0);
}
}
......@@ -11,6 +11,20 @@ import flanagan.analysis.Stat;
*/
public class MathUtil {
/**
* Sums a list of numbers.
*
* @param list the list
* @return the double
*/
public static double sum(List<? extends Number> list) {
double sum = 0;
for (Number number : list) {
sum += number.doubleValue();
}
return sum;
}
/**
* List to array.
*
......
/*
* Title: CloudSim Toolkit Description: CloudSim (Cloud Simulation) Toolkit for Modeling and
* Simulation of Clouds Licence: GPL - http://www.gnu.org/copyleft/gpl.html
*
* Copyright (c) 2009-2010, The University of Melbourne, Australia
*/
package org.cloudbus.cloudsim;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
import org.cloudbus.cloudsim.lists.PeList;
import org.cloudbus.cloudsim.provisioners.PeProvisionerSimple;
import org.junit.Before;
import org.junit.Test;
/**
* @author Anton Beloglazov
* @since CloudSim Toolkit 2.0
*/
public class VmSchedulerTimeSharedOverSubscriptionTest {
private static final double MIPS = 1000;
private VmSchedulerTimeSharedOverSubscription vmScheduler;
private List<Pe> peList;
private Vm vm1;
private Vm vm2;
// private Vm vm3;
@Before
public void setUp() throws Exception {
peList = new ArrayList<Pe>();
peList.add(new Pe(0, new PeProvisionerSimple(MIPS)));
peList.add(new Pe(1, new PeProvisionerSimple(MIPS)));
vmScheduler = new VmSchedulerTimeSharedOverSubscription(peList);
vm1 = new Vm(0, 0, MIPS / 4, 1, 0, 0, 0, "", null);
vm2 = new Vm(1, 0, MIPS / 2, 2, 0, 0, 0, "", null);
// vm3 = new Vm(2, 0, MIPS, 2, 0, 0, 0, 0, "", null);
}
@Test
public void testInit() {
assertSame(peList, vmScheduler.getPeList());
assertEquals(PeList.getTotalMips(peList), vmScheduler.getAvailableMips(), 0);
assertEquals(PeList.getTotalMips(peList), vmScheduler.getMaxAvailableMips(), 0);
assertEquals(0, vmScheduler.getTotalAllocatedMipsForVm(vm1), 0);
}
@Test
public void testAllocatePesForVm() {
List<Double> mipsShare1 = new ArrayList<Double>();
mipsShare1.add(MIPS / 4);
assertTrue(vmScheduler.allocatePesForVm(vm1, mipsShare1));
assertEquals(PeList.getTotalMips(peList) - MIPS / 4, vmScheduler.getAvailableMips(), 0);
assertEquals(PeList.getTotalMips(peList) - MIPS / 4, vmScheduler.getMaxAvailableMips(), 0);
assertEquals(MIPS / 4, vmScheduler.getTotalAllocatedMipsForVm(vm1), 0);
List<Double> mipsShare2 = new ArrayList<Double>();
mipsShare2.add(MIPS / 2);
mipsShare2.add(MIPS / 8);
assertTrue(vmScheduler.allocatePesForVm(vm2, mipsShare2));
assertEquals(
PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8,
vmScheduler.getAvailableMips(),
0);
assertEquals(
PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8,
vmScheduler.getMaxAvailableMips(),
0);
assertEquals(MIPS / 2 + MIPS / 8, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
// List<Double> mipsShare3 = new ArrayList<Double>();
// mipsShare3.add(MIPS);
// mipsShare3.add(MIPS);
//
// assertTrue(vmScheduler.allocatePesForVm(vm3, mipsShare3));
//
// assertEquals(0, vmScheduler.getAvailableMips(), 0);
// assertEquals(0, vmScheduler.getMaxAvailableMips(), 0);
// assertEquals(MIPS / 4 - (MIPS / 4 + MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) / 5,
// vmScheduler.getTotalAllocatedMipsForVm(vm1), 0);
// assertEquals(MIPS / 2 + MIPS / 8 - (MIPS / 4 + MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS *
// 2) * 2 / 5, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
// assertEquals(MIPS * 2 - (MIPS / 4 + MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 /
// 5, vmScheduler.getTotalAllocatedMipsForVm(vm3), 0);
//
// vmScheduler.deallocatePesForVm(vm1);
//
// assertEquals(0, vmScheduler.getAvailableMips(), 0);
// assertEquals(0, vmScheduler.getMaxAvailableMips(), 0);
// assertEquals(MIPS / 2 + MIPS / 8 - (MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 /
// 4, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
// assertEquals(MIPS * 2 - (MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 / 4,
// vmScheduler.getTotalAllocatedMipsForVm(vm3), 0);
//
// vmScheduler.deallocatePesForVm(vm3);
//
// assertEquals(MIPS * 2 - MIPS / 2 - MIPS / 8, vmScheduler.getAvailableMips(), 0);
// assertEquals(MIPS * 2 - MIPS / 2 - MIPS / 8, vmScheduler.getMaxAvailableMips(), 0);
// assertEquals(0, vmScheduler.getTotalAllocatedMipsForVm(vm3), 0);
//
// vmScheduler.deallocatePesForVm(vm2);
vmScheduler.deallocatePesForAllVms();
assertEquals(PeList.getTotalMips(peList), vmScheduler.getAvailableMips(), 0);
assertEquals(PeList.getTotalMips(peList), vmScheduler.getMaxAvailableMips(), 0);
assertEquals(0, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
}
@Test
public void testAllocatePesForVmInMigration() {
vm1.setInMigration(true);
vm2.setInMigration(true);
List<Double> mipsShare1 = new ArrayList<Double>();
mipsShare1.add(MIPS / 4);
assertTrue(vmScheduler.allocatePesForVm(vm1, mipsShare1));
assertEquals(PeList.getTotalMips(peList) - MIPS / 4, vmScheduler.getAvailableMips(), 0);
assertEquals(PeList.getTotalMips(peList) - MIPS / 4, vmScheduler.getMaxAvailableMips(), 0);
assertEquals(0.9 * MIPS / 4, vmScheduler.getTotalAllocatedMipsForVm(vm1), 0);
List<Double> mipsShare2 = new ArrayList<Double>();
mipsShare2.add(MIPS / 2);
mipsShare2.add(MIPS / 8);
assertTrue(vmScheduler.allocatePesForVm(vm2, mipsShare2));
assertEquals(
PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8,
vmScheduler.getAvailableMips(),
0);
assertEquals(
PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8,
vmScheduler.getMaxAvailableMips(),
0);
assertEquals(0.9 * MIPS / 2 + 0.9 * MIPS / 8, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
vmScheduler.deallocatePesForAllVms();
assertEquals(PeList.getTotalMips(peList), vmScheduler.getAvailableMips(), 0);
assertEquals(PeList.getTotalMips(peList), vmScheduler.getMaxAvailableMips(), 0);
assertEquals(0, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
}
@Test
public void testAllocatePesForVmShortageEqualsToAllocatedMips() {
List<Pe> peList = new ArrayList<Pe>();
peList.add(new Pe(0, new PeProvisionerSimple(3500)));
VmScheduler vmScheduler = new VmSchedulerTimeSharedOverSubscription(peList);
Vm vm1 = new Vm(0, 0, 170, 1, 0, 0, 0, "", null);
Vm vm2 = new Vm(1, 0, 2000, 1, 0, 0, 0, "", null);
Vm vm3 = new Vm(2, 0, 10, 1, 0, 0, 0, "", null);
Vm vm4 = new Vm(3, 0, 2000, 1, 0, 0, 0, "", null);
List<Double> mipsShare1 = new ArrayList<Double>();
mipsShare1.add(170.0);
List<Double> mipsShare2 = new ArrayList<Double>();
mipsShare2.add(2000.0);
List<Double> mipsShare3 = new ArrayList<Double>();
mipsShare3.add(10.0);
List<Double> mipsShare4 = new ArrayList<Double>();
mipsShare4.add(2000.0);
assertTrue(vmScheduler.allocatePesForVm(vm1, mipsShare1));
assertEquals(3330, vmScheduler.getAvailableMips(), 0);
assertEquals(170, vmScheduler.getTotalAllocatedMipsForVm(vm1), 0);
assertTrue(vmScheduler.allocatePesForVm(vm2, mipsShare2));
assertEquals(1330, vmScheduler.getAvailableMips(), 0);
assertEquals(2000, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
assertTrue(vmScheduler.allocatePesForVm(vm3, mipsShare3));
assertEquals(1320, vmScheduler.getAvailableMips(), 0);
assertEquals(10, vmScheduler.getTotalAllocatedMipsForVm(vm3), 0);
assertTrue(vmScheduler.allocatePesForVm(vm4, mipsShare4));
assertEquals(0, vmScheduler.getAvailableMips(), 0);
assertEquals(1674, vmScheduler.getTotalAllocatedMipsForVm(vm4), 0);
vmScheduler.deallocatePesForAllVms();
assertEquals(3500, vmScheduler.getAvailableMips(), 0);
assertEquals(3500, vmScheduler.getMaxAvailableMips(), 0);
}
}
/*
* Title: CloudSim Toolkit
* Description: CloudSim (Cloud Simulation) Toolkit for Modeling and Simulation of Clouds
* Licence: GPL - http://www.gnu.org/copyleft/gpl.html
* Title: CloudSim Toolkit Description: CloudSim (Cloud Simulation) Toolkit for Modeling and
* Simulation of Clouds Licence: GPL - http://www.gnu.org/copyleft/gpl.html
*
* Copyright (c) 2009-2010, The University of Melbourne, Australia
*/
......@@ -29,10 +28,14 @@ public class VmSchedulerTimeSharedTest {
private static final double MIPS = 1000;
private VmSchedulerTimeShared vmScheduler;
private List<Pe> peList;
private Vm vm1;
private Vm vm2;
//private Vm vm3;
// private Vm vm3;
@Before
public void setUp() throws Exception {
......@@ -42,7 +45,7 @@ public class VmSchedulerTimeSharedTest {
vmScheduler = new VmSchedulerTimeShared(peList);
vm1 = new Vm(0, 0, MIPS / 4, 1, 0, 0, 0, "", null);
vm2 = new Vm(1, 0, MIPS / 2, 2, 0, 0, 0, "", null);
//vm3 = new Vm(2, 0, MIPS, 2, 0, 0, 0, 0, "", null);
// vm3 = new Vm(2, 0, MIPS, 2, 0, 0, 0, 0, "", null);
}
@Test
......@@ -70,8 +73,14 @@ public class VmSchedulerTimeSharedTest {
assertTrue(vmScheduler.allocatePesForVm(vm2, mipsShare2));
assertEquals(PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8, vmScheduler.getAvailableMips(), 0);
assertEquals(PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8, vmScheduler.getMaxAvailableMips(), 0);
assertEquals(
PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8,
vmScheduler.getAvailableMips(),
0);
assertEquals(
PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8,
vmScheduler.getMaxAvailableMips(),
0);
assertEquals(MIPS / 2 + MIPS / 8, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
// List<Double> mipsShare3 = new ArrayList<Double>();
......@@ -82,16 +91,21 @@ public class VmSchedulerTimeSharedTest {
//
// assertEquals(0, vmScheduler.getAvailableMips(), 0);
// assertEquals(0, vmScheduler.getMaxAvailableMips(), 0);
// assertEquals(MIPS / 4 - (MIPS / 4 + MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) / 5, vmScheduler.getTotalAllocatedMipsForVm(vm1), 0);
// assertEquals(MIPS / 2 + MIPS / 8 - (MIPS / 4 + MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 / 5, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
// assertEquals(MIPS * 2 - (MIPS / 4 + MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 / 5, vmScheduler.getTotalAllocatedMipsForVm(vm3), 0);
// assertEquals(MIPS / 4 - (MIPS / 4 + MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) / 5,
// vmScheduler.getTotalAllocatedMipsForVm(vm1), 0);
// assertEquals(MIPS / 2 + MIPS / 8 - (MIPS / 4 + MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS *
// 2) * 2 / 5, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
// assertEquals(MIPS * 2 - (MIPS / 4 + MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 /
// 5, vmScheduler.getTotalAllocatedMipsForVm(vm3), 0);
//
// vmScheduler.deallocatePesForVm(vm1);
//
// assertEquals(0, vmScheduler.getAvailableMips(), 0);
// assertEquals(0, vmScheduler.getMaxAvailableMips(), 0);
// assertEquals(MIPS / 2 + MIPS / 8 - (MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 / 4, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
// assertEquals(MIPS * 2 - (MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 / 4, vmScheduler.getTotalAllocatedMipsForVm(vm3), 0);
// assertEquals(MIPS / 2 + MIPS / 8 - (MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 /
// 4, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
// assertEquals(MIPS * 2 - (MIPS / 2 + MIPS / 8 + MIPS + MIPS - MIPS * 2) * 2 / 4,
// vmScheduler.getTotalAllocatedMipsForVm(vm3), 0);
//
// vmScheduler.deallocatePesForVm(vm3);
//
......@@ -128,8 +142,14 @@ public class VmSchedulerTimeSharedTest {
assertTrue(vmScheduler.allocatePesForVm(vm2, mipsShare2));
assertEquals(PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8, vmScheduler.getAvailableMips(), 0);
assertEquals(PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8, vmScheduler.getMaxAvailableMips(), 0);
assertEquals(
PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8,
vmScheduler.getAvailableMips(),
0);
assertEquals(
PeList.getTotalMips(peList) - MIPS / 4 - MIPS / 2 - MIPS / 8,
vmScheduler.getMaxAvailableMips(),
0);
assertEquals(0.9 * MIPS / 2 + 0.9 * MIPS / 8, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
vmScheduler.deallocatePesForAllVms();
......@@ -139,48 +159,4 @@ public class VmSchedulerTimeSharedTest {
assertEquals(0, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
}
@Test
public void testAllocatePesForVmShortageEqualsToAllocatedMips() {
List<Pe> peList = new ArrayList<Pe>();
peList.add(new Pe(0, new PeProvisionerSimple(3500)));
VmScheduler vmScheduler = new VmSchedulerTimeShared(peList);
Vm vm1 = new Vm(0, 0, 170, 1, 0, 0, 0, "", null);
Vm vm2 = new Vm(1, 0, 2000, 1, 0, 0, 0, "", null);
Vm vm3 = new Vm(2, 0, 10, 1, 0, 0, 0, "", null);
Vm vm4 = new Vm(3, 0, 2000, 1, 0, 0, 0, "", null);
List<Double> mipsShare1 = new ArrayList<Double>();
mipsShare1.add(170.0);
List<Double> mipsShare2 = new ArrayList<Double>();
mipsShare2.add(2000.0);
List<Double> mipsShare3 = new ArrayList<Double>();
mipsShare3.add(10.0);
List<Double> mipsShare4 = new ArrayList<Double>();
mipsShare4.add(2000.0);
assertTrue(vmScheduler.allocatePesForVm(vm1, mipsShare1));
assertEquals(3330, vmScheduler.getAvailableMips(), 0);
assertEquals(170, vmScheduler.getTotalAllocatedMipsForVm(vm1), 0);
assertTrue(vmScheduler.allocatePesForVm(vm2, mipsShare2));
assertEquals(1330, vmScheduler.getAvailableMips(), 0);
assertEquals(2000, vmScheduler.getTotalAllocatedMipsForVm(vm2), 0);
assertTrue(vmScheduler.allocatePesForVm(vm3, mipsShare3));
assertEquals(1320, vmScheduler.getAvailableMips(), 0);
assertEquals(10, vmScheduler.getTotalAllocatedMipsForVm(vm3), 0);
assertTrue(vmScheduler.allocatePesForVm(vm4, mipsShare4));
assertEquals(0, vmScheduler.getAvailableMips(), 0);
assertEquals(1750, vmScheduler.getTotalAllocatedMipsForVm(vm4), 0);
vmScheduler.deallocatePesForAllVms();
assertEquals(3500, vmScheduler.getAvailableMips(), 0);
assertEquals(3500, vmScheduler.getMaxAvailableMips(), 0);
}
}
......@@ -3,7 +3,9 @@ package org.cloudbus.cloudsim.util;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import org.cloudbus.cloudsim.util.MathUtil;
import java.util.ArrayList;
import java.util.List;
import org.junit.Test;
public class MathUtilTest {
......@@ -12,6 +14,8 @@ public class MathUtilTest {
public static final double IQR1 = 10;
public static final double SUM1 = 1206;
public static final double[] DATA2 = { 2, 4, 7, -20, 22, -1, 0, -1, 7, 15, 8, 4, -4, 11, 11, 12, 3, 12,
18, 1 };
......@@ -48,4 +52,21 @@ public class MathUtilTest {
assertArrayEquals(NON_ZERO_TAIL, MathUtil.trimZeroTail(DATA4), 0);
}
@Test
public void testSum() {
List<Double> data1 = new ArrayList<Double>();
for (Double number : DATA1) {
data1.add(number);
}
assertEquals(SUM1, MathUtil.sum(data1), 0);
List<Double> data2 = new ArrayList<Double>();
for (Double number : DATA1) {
data2.add(number / 10);
}
assertEquals(SUM1 / 10, MathUtil.sum(data2), 0);
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment