Commit 59a0ddda authored by Ahmad Siavashi's avatar Ahmad Siavashi

untested working example added

parent 3a7beb19
package org.cloudbus.cloudsim.examples.gpu;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import org.cloudbus.cloudsim.Cloudlet;
import org.cloudbus.cloudsim.DatacenterCharacteristics;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.Storage;
import org.cloudbus.cloudsim.UtilizationModel;
import org.cloudbus.cloudsim.UtilizationModelFull;
import org.cloudbus.cloudsim.VmScheduler;
import org.cloudbus.cloudsim.VmSchedulerTimeShared;
import org.cloudbus.cloudsim.core.CloudSim;
import org.cloudbus.cloudsim.gpu.GpuCloudlet;
import org.cloudbus.cloudsim.gpu.GpuCloudletSchedulerTimeShared;
import org.cloudbus.cloudsim.gpu.GpuHost;
import org.cloudbus.cloudsim.gpu.GpuHostTags;
import org.cloudbus.cloudsim.gpu.GpuTask;
import org.cloudbus.cloudsim.gpu.GpuTaskSchedulerLeftover;
import org.cloudbus.cloudsim.gpu.GpuVm;
import org.cloudbus.cloudsim.gpu.GpuVmTags;
import org.cloudbus.cloudsim.gpu.GridVgpuTags;
import org.cloudbus.cloudsim.gpu.Pgpu;
import org.cloudbus.cloudsim.gpu.Vgpu;
import org.cloudbus.cloudsim.gpu.VgpuScheduler;
import org.cloudbus.cloudsim.gpu.VideoCard;
import org.cloudbus.cloudsim.gpu.VideoCardTags;
import org.cloudbus.cloudsim.gpu.allocation.VideoCardAllocationPolicy;
import org.cloudbus.cloudsim.gpu.allocation.VideoCardAllocationPolicySimple;
import org.cloudbus.cloudsim.gpu.performance.models.PerformanceModel;
import org.cloudbus.cloudsim.gpu.performance.models.PerformanceModelGpuConstant;
import org.cloudbus.cloudsim.gpu.power.PowerGpuDatacenter;
import org.cloudbus.cloudsim.gpu.power.PowerGpuDatacenterBroker;
import org.cloudbus.cloudsim.gpu.power.PowerGpuHost;
import org.cloudbus.cloudsim.gpu.power.PowerVideoCard;
import org.cloudbus.cloudsim.gpu.power.models.GpuHostPowerModelLinear;
import org.cloudbus.cloudsim.gpu.power.models.VideoCardPowerModel;
import org.cloudbus.cloudsim.gpu.power.models.VideoCardPowerModelNvidiaGridK1;
import org.cloudbus.cloudsim.gpu.provisioners.BwProvisionerRelaxed;
import org.cloudbus.cloudsim.gpu.provisioners.GpuBwProvisionerShared;
import org.cloudbus.cloudsim.gpu.provisioners.GpuGddramProvisionerSimple;
import org.cloudbus.cloudsim.gpu.provisioners.VideoCardBwProvisioner;
import org.cloudbus.cloudsim.gpu.provisioners.VideoCardBwProvisionerShared;
import org.cloudbus.cloudsim.gpu.remote.RemoteGpuHost;
import org.cloudbus.cloudsim.gpu.remote.RemoteGpuVmAllocationPolicyFirstFit;
import org.cloudbus.cloudsim.gpu.remote.RemoteVgpuSchedulerFairShare;
import org.cloudbus.cloudsim.gpu.remote.RemoteVgpuTags;
import org.cloudbus.cloudsim.gpu.selection.PgpuSelectionPolicy;
import org.cloudbus.cloudsim.gpu.selection.PgpuSelectionPolicySimple;
import org.cloudbus.cloudsim.lists.VmList;
import org.cloudbus.cloudsim.power.models.PowerModel;
import org.cloudbus.cloudsim.provisioners.PeProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.RamProvisionerSimple;
import de.vandermeer.asciitable.AsciiTable;
/**
* This example demonstrates the use of gpu package for the simulation of remote
* GPU virtualization. <br>
* Performance Model: on <br>
* Interference Model: off <br>
* Power Model: on
*
* @author Ahmad Siavashi
*
*/
public class CloudSimGpuExample6 {
/** The cloudlet list. */
private static List<GpuCloudlet> cloudletList;
/** The vmlist. */
private static List<GpuVm> vmlist;
/** The datacenter list. */
private static List<PowerGpuDatacenter> datacenterList;
/** number of VMs. */
private static int numVms = 1;
/** number of gpu-cloudlets */
private static int numGpuCloudlets = 1;
/**
* The resolution in which progress in evaluated.
*/
private static double schedulingInterval = 20;
/**
* Creates main() to run this example.
*
* @param args
* the args
*/
@SuppressWarnings("unused")
public static void main(String[] args) {
Log.printLine("Starting CloudSimGpuExample6...");
try {
// number of cloud users
int num_user = 1;
Calendar calendar = Calendar.getInstance();
// trace events
boolean trace_flag = true;
// CloudSim initialization
CloudSim.init(num_user, calendar, trace_flag);
// Create a list to hold created datacenters
datacenterList = new ArrayList<>();
// Create one Datacenter
PowerGpuDatacenter datacenter = createDatacenter("Datacenter");
// add the datacenter to the datacenterList
datacenterList.add(datacenter);
// Create one Broker
PowerGpuDatacenterBroker broker = createBroker("Broker");
int brokerId = broker.getId();
// Create a list to hold created VMs
vmlist = new ArrayList<GpuVm>();
// Create a list to hold issued Cloudlets
cloudletList = new ArrayList<GpuCloudlet>();
// Create VMs
for (int i = 0; i < numVms; i++) {
int vmId = i;
int vgpuId = i;
// Create a VM
GpuVm vm = createGpuVm(vmId, vgpuId, brokerId);
// add the VM to the vmList
vmlist.add(vm);
}
// Create GpuCloudlets
for (int i = 0; i < numGpuCloudlets; i++) {
int gpuCloudletId = i;
int gpuTaskId = i;
// Create Cloudlet
GpuCloudlet cloudlet = createGpuCloudlet(gpuCloudletId, gpuTaskId, brokerId);
// add the cloudlet to the list
cloudletList.add(cloudlet);
}
// Cloudlet-VM assignment
for (int i = 0; i < numGpuCloudlets; i++) {
GpuCloudlet cloudlet = cloudletList.get(i);
cloudlet.setVmId(i % numVms);
}
// submit vm list to the broker
broker.submitVmList(vmlist);
// submit cloudlet list to the broker
broker.submitCloudletList(cloudletList);
// Disable Logs
Log.disable();
// Starts the simulation
CloudSim.startSimulation();
CloudSim.stopSimulation();
Log.enable();
// Print results when simulation is over
List<Cloudlet> newList = broker.getCloudletReceivedList();
printCloudletList(newList);
Log.printLine("CloudSimGpuExample6 finished!");
} catch (Exception e) {
e.printStackTrace();
Log.printLine("Unwanted errors happen");
}
}
/**
* Create a GpuCloudlet.
*/
private static GpuCloudlet createGpuCloudlet(int gpuCloudletId, int gpuTaskId, int brokerId) {
// Cloudlet properties
long length = (long) (400 * GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS);
long fileSize = 300;
long outputSize = 300;
int pesNumber = 1;
UtilizationModel cpuUtilizationModel = new UtilizationModelFull();
UtilizationModel ramUtilizationModel = new UtilizationModelFull();
UtilizationModel bwUtilizationModel = new UtilizationModelFull();
// GpuTask properties
long taskLength = (long) (VideoCardTags.NVIDIA_K1_CARD_PE_MIPS * 150);
long taskInputSize = 128;
long taskOutputSize = 128;
long requestedGddramSize = 4 * 1024;
int numberOfBlocks = 2;
UtilizationModel gpuUtilizationModel = new UtilizationModelFull();
UtilizationModel gddramUtilizationModel = new UtilizationModelFull();
UtilizationModel gddramBwUtilizationModel = new UtilizationModelFull();
GpuTask gpuTask = new GpuTask(gpuTaskId, taskLength, numberOfBlocks, taskInputSize, taskOutputSize,
requestedGddramSize, gpuUtilizationModel, gddramUtilizationModel, gddramBwUtilizationModel);
GpuCloudlet gpuCloudlet = new GpuCloudlet(gpuCloudletId, length, pesNumber, fileSize, outputSize,
cpuUtilizationModel, ramUtilizationModel, bwUtilizationModel, gpuTask, false);
gpuCloudlet.setUserId(brokerId);
return gpuCloudlet;
}
/**
* Create a VM.
*/
private static GpuVm createGpuVm(int vmId, int vgpuId, int brokerId) {
// VM description
double mips = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS;
// image size (GB)
int size = 10;
// vm memory (GB)
int ram = 2;
long bw = 100;
// number of cpus
int pesNumber = 4;
// VMM name
String vmm = "Xen";
// Create VM
GpuVm vm = new GpuVm(vmId, brokerId, mips, pesNumber, ram, bw, size, vmm, GpuVmTags.GPU_VM_CUSTOM,
new GpuCloudletSchedulerTimeShared());
// Create GpuTask Scheduler
GpuTaskSchedulerLeftover gpuTaskScheduler = new GpuTaskSchedulerLeftover();
// Create a Vgpu
final int vgpuType = RemoteVgpuTags.REMOTE_SHARED;
// GDDRAM: 256 MB
final int vgpuGddram = 4096;
Vgpu vgpu = new Vgpu(vgpuId, 0, 0, vgpuGddram, 0, vgpuType, gpuTaskScheduler, VideoCardTags.PCI_E_3_X16_BW);
vm.setVgpu(vgpu);
return vm;
}
/**
* Create a datacenter.
*
* @param name
* the name of the datacenter
*
* @return the datacenter
*/
private static PowerGpuDatacenter createDatacenter(String name) {
// We need to create a list to store our machine
List<GpuHost> hostList = new ArrayList<>();
/** Create 2 hosts, one is GPU-equipped */
/** A host with GPU **/
// Number of host's video cards
int numVideoCards = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_NUM_VIDEO_CARDS;
// To hold video cards
List<VideoCard> videoCards = new ArrayList<VideoCard>(numVideoCards);
for (int videoCardId = 0; videoCardId < numVideoCards; videoCardId++) {
List<Pgpu> pgpus = new ArrayList<>();
// Adding an NVIDIA K1 Card
double mips = VideoCardTags.NVIDIA_K1_CARD_PE_MIPS;
int gddram = VideoCardTags.NVIDIA_K1_CARD_GPU_MEM;
long bw = VideoCardTags.NVIDIA_K1_CARD_BW_PER_BUS;
for (int pgpuId = 0; pgpuId < VideoCardTags.NVIDIA_K1_CARD_GPUS; pgpuId++) {
List<Pe> pes = new ArrayList<Pe>();
for (int peId = 0; peId < VideoCardTags.NVIDIA_K1_CARD_GPU_PES; peId++) {
pes.add(new Pe(peId, new PeProvisionerSimple(mips)));
}
pgpus.add(
new Pgpu(pgpuId, pes, new GpuGddramProvisionerSimple(gddram), new GpuBwProvisionerShared(bw)));
}
// Pgpu selection policy
PgpuSelectionPolicy pgpuSelectionPolicy = new PgpuSelectionPolicySimple();
// Performance Model
double performanceLoss = 0.1;
PerformanceModel<VgpuScheduler, Vgpu> performanceModel = new PerformanceModelGpuConstant(performanceLoss);
// Scheduler
RemoteVgpuSchedulerFairShare vgpuScheduler = new RemoteVgpuSchedulerFairShare(
VideoCardTags.NVIDIA_K1_CARD, pgpus, pgpuSelectionPolicy, performanceModel);
// PCI Express Bus Bw Provisioner
VideoCardBwProvisioner videoCardBwProvisioner = new VideoCardBwProvisionerShared(
VideoCardTags.PCI_E_3_X16_BW);
// Video Card Power Model
VideoCardPowerModel videoCardPowerModel = new VideoCardPowerModelNvidiaGridK1(false);
// Create a video card
PowerVideoCard videoCard = new PowerVideoCard(videoCardId, VideoCardTags.NVIDIA_K1_CARD, vgpuScheduler,
videoCardBwProvisioner, videoCardPowerModel);
videoCards.add(videoCard);
}
// Create a host
int hostId = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3;
// A Machine contains one or more PEs or CPUs/Cores.
List<Pe> peList = new ArrayList<Pe>();
// PE's MIPS power
double mips = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS;
for (int peId = 0; peId < GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_NUM_PES; peId++) {
// Create PEs and add these into a list.
peList.add(new Pe(0, new PeProvisionerSimple(mips)));
}
// Create Host with its id and list of PEs and add them to the list of machines
// host memory (MB)
int ram = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_RAM;
// host storage
long storage = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_STORAGE;
// host BW
int bw = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_BW;
// Set VM Scheduler
VmScheduler vmScheduler = new VmSchedulerTimeShared(peList);
// Host Power Model
double hostMaxPower = 200;
double hostStaticPowerPercent = 0.70;
PowerModel powerModel = new GpuHostPowerModelLinear(hostMaxPower, hostStaticPowerPercent);
// Video Card Selection Policy
VideoCardAllocationPolicy videoCardAllocationPolicy = new VideoCardAllocationPolicySimple(videoCards);
RemoteGpuHost newHost = new RemoteGpuHost(hostId, GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3,
new RamProvisionerSimple(ram), new BwProvisionerRelaxed(bw), storage, peList, vmScheduler,
videoCardAllocationPolicy, powerModel);
hostList.add(newHost);
/** A host without GPU **/
// A Machine contains one or more PEs or CPUs/Cores.
peList = new ArrayList<Pe>();
for (int peId = 0; peId < GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_NUM_PES; peId++) {
// Create PEs and add these into a list.
peList.add(new Pe(0, new PeProvisionerSimple(GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_PE_MIPS)));
}
powerModel = new GpuHostPowerModelLinear(hostMaxPower, hostStaticPowerPercent);
newHost = new RemoteGpuHost(GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4, GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4,
new RamProvisionerSimple(GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_RAM),
new BwProvisionerRelaxed(GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_BW),
GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_STORAGE, peList, new VmSchedulerTimeShared(peList), null,
powerModel);
hostList.add(newHost);
// Create a DatacenterCharacteristics object that stores the
// properties of a data center: architecture, OS, list of
// Machines, allocation policy: time- or space-shared, time zone
// and its price (G$/Pe time unit).
// system architecture
String arch = "x86";
// operating system
String os = "Linux";
// VM Manager
String vmm = "Horizen";
// time zone this resource located (Tehran)
double time_zone = +3.5;
// the cost of using processing in this resource
double cost = 0.0;
// the cost of using memory in this resource
double costPerMem = 0.00;
// the cost of using storage in this resource
double costPerStorage = 0.000;
// the cost of using bw in this resource
double costPerBw = 0.0;
// we are not adding SAN devices by now
LinkedList<Storage> storageList = new LinkedList<Storage>();
DatacenterCharacteristics characteristics = new DatacenterCharacteristics(arch, os, vmm, hostList, time_zone,
cost, costPerMem, costPerStorage, costPerBw);
// We need to create a Datacenter object.
PowerGpuDatacenter datacenter = null;
try {
datacenter = new PowerGpuDatacenter(name, characteristics, new RemoteGpuVmAllocationPolicyFirstFit(hostList),
storageList, schedulingInterval);
} catch (Exception e) {
e.printStackTrace();
}
return datacenter;
}
/**
* Creates the broker.
*
* * @param name the name
*
* @return the datacenter broker
*/
private static PowerGpuDatacenterBroker createBroker(String name) {
PowerGpuDatacenterBroker broker = null;
try {
broker = new PowerGpuDatacenterBroker(name);
} catch (Exception e) {
e.printStackTrace();
return null;
}
return broker;
}
/**
* Prints the GpuCloudlet objects.
*
* @param list
* list of GpuCloudlets
*/
private static void printCloudletList(List<Cloudlet> gpuCloudlets) {
Log.printLine(String.join("", Collections.nCopies(100, "-")));
DecimalFormat dft = new DecimalFormat("###.##");
for (GpuCloudlet gpuCloudlet : (List<GpuCloudlet>) (List<?>) gpuCloudlets) {
// Cloudlet
AsciiTable at = new AsciiTable();
at.addRule();
at.addRow("Cloudlet ID", "Status", "Datacenter ID", "VM ID", "Time", "Start Time", "Finish Time");
at.addRule();
if (gpuCloudlet.getCloudletStatus() == Cloudlet.SUCCESS) {
at.addRow(gpuCloudlet.getCloudletId(), "SUCCESS", gpuCloudlet.getResourceId(), gpuCloudlet.getVmId(),
dft.format(gpuCloudlet.getActualCPUTime()).toString(),
dft.format(gpuCloudlet.getExecStartTime()).toString(),
dft.format(gpuCloudlet.getFinishTime()).toString());
at.addRule();
}
GpuTask gpuTask = gpuCloudlet.getGpuTask();
// Host-Device Memory Transfer
AsciiTable atMT = new AsciiTable();
atMT.addRule();
atMT.addRow("Direction", "Time", "Start Time", "End Time");
atMT.addRule();
atMT.addRow("H2D", dft.format(gpuTask.getMemoryTransferHostToDevice().getTime()).toString(),
dft.format(gpuTask.getMemoryTransferHostToDevice().startTime).toString(),
dft.format(gpuTask.getMemoryTransferHostToDevice().endTime).toString());
atMT.addRule();
// Gpu Task
at.addRow("Task ID", "Cloudlet ID", "Status", "vGPU Profile", "Time", "Start Time", "Finish Time");
at.addRule();
if (gpuTask.getTaskStatus() == GpuTask.FINISHED) {
at.addRow(gpuTask.getTaskId(), gpuTask.getCloudlet().getCloudletId(), "SUCCESS",
GridVgpuTags.getVgpuTypeString(
((GpuVm) VmList.getById(vmlist, gpuTask.getCloudlet().getVmId())).getVgpu().getType()),
dft.format(gpuTask.getActualGPUTime()).toString(),
dft.format(gpuTask.getExecStartTime()).toString(),
dft.format(gpuTask.getFinishTime()).toString());
at.addRule();
}
// Device-Host Memory Transfer
atMT.addRow("D2H", dft.format(gpuTask.getMemoryTransferDeviceToHost().getTime()).toString(),
dft.format(gpuTask.getMemoryTransferDeviceToHost().startTime).toString(),
dft.format(gpuTask.getMemoryTransferDeviceToHost().endTime).toString());
atMT.addRule();
at.getContext().setWidth(100);
atMT.getContext().setWidth(100);
Log.printLine(at.render());
Log.printLine(atMT.render());
Log.printLine(String.join("", Collections.nCopies(100, "-")));
}
AsciiTable at = new AsciiTable();
at.addRule();
at.addRow("Entity", "Energy Consumed (Joules)");
at.addRule();
for (PowerGpuDatacenter datacenter : datacenterList) {
String depth = "#" + datacenter.getId();
at.addRow("Datacenter " + depth, dft.format(datacenter.getConsumedEnergy()).toString());
at.addRule();
for (Entry<PowerGpuHost, Double> entry : datacenter.getHostEnergyMap().entrySet()) {
PowerGpuHost host = entry.getKey();
depth = "#" + host.getId() + " / " + depth;
at.addRow("Host " + depth, dft.format(datacenter.getHostCpuEnergyMap().get(host)).toString() + " / "
+ dft.format(datacenter.getHostEnergyMap().get(host)).toString());
at.addRule();
if (host.getVideoCardAllocationPolicy() != null) {
for (PowerVideoCard videoCard : (List<PowerVideoCard>) host.getVideoCardAllocationPolicy()
.getVideoCards()) {
depth = "#" + videoCard.getId() + " / " + depth;
at.addRow("Video Card " + depth,
dft.format(datacenter.getHostVideoCardEnergyMap().get(host).get(videoCard)).toString()
+ " / " + dft.format(datacenter.getHostEnergyMap().get(host)).toString());
at.addRule();
}
}
depth = "#" + datacenter.getId();
}
}
at.getContext().setWidth(100);
Log.printLine(at.render());
}
}
......@@ -250,7 +250,7 @@ public class Vgpu {
* @param mips
* the new mips
*/
protected void setPeMips(double mips) {
public void setPeMips(double mips) {
this.peMips = mips;
}
......@@ -269,7 +269,7 @@ public class Vgpu {
* @param numberOfPes
* the new number of pes
*/
protected void setNumberOfPes(int numberOfPes) {
public void setNumberOfPes(int numberOfPes) {
this.numberOfPes = numberOfPes;
}
......@@ -315,7 +315,7 @@ public class Vgpu {
* @pre bw > 0
* @post $none
*/
protected void setBw(long bw) {
public void setBw(long bw) {
this.bw = bw;
}
......@@ -336,7 +336,7 @@ public class Vgpu {
* @param type
* the new virtual gpu type
*/
protected void setType(int type) {
public void setType(int type) {
this.type = type;
}
......
......@@ -21,8 +21,6 @@ import org.cloudbus.cloudsim.gpu.util.GridVgpuUtil;
*/
public class PerformanceModelGpuConstant implements PerformanceModel<VgpuScheduler, Vgpu> {
public static final double TOTAL_CAPACITY = 0;
protected final double gain;
/**
......
/**
*
*/
package org.cloudbus.cloudsim.gpu.remote;
import java.util.ArrayList;
import java.util.List;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.Vm;
import org.cloudbus.cloudsim.VmScheduler;
import org.cloudbus.cloudsim.gpu.GpuVm;
import org.cloudbus.cloudsim.gpu.Vgpu;
import org.cloudbus.cloudsim.gpu.allocation.VideoCardAllocationPolicy;
import org.cloudbus.cloudsim.gpu.performance.PerformanceScheduler;
import org.cloudbus.cloudsim.gpu.power.PowerGpuHost;
import org.cloudbus.cloudsim.power.models.PowerModel;
import org.cloudbus.cloudsim.provisioners.BwProvisioner;
import org.cloudbus.cloudsim.provisioners.RamProvisioner;
/**
*
* A {@link PowerGpuHost} which supports GPU remoting.
*
* @author Ahmad Siavashi
*
*/
public class RemoteGpuHost extends PowerGpuHost {
/**
* A {@link PowerGpuHost} which supports GPU remoting.
*/
public RemoteGpuHost(int id, int type, RamProvisioner ramProvisioner, BwProvisioner bwProvisioner, long storage,
List<? extends Pe> peList, VmScheduler vmScheduler, VideoCardAllocationPolicy videoCardAllocationPolicy,
PowerModel powerModel) {
super(id, type, ramProvisioner, bwProvisioner, storage, peList, vmScheduler, videoCardAllocationPolicy,
powerModel);
}
/**
* A {@link PowerGpuHost} which supports GPU remoting.
*/
public RemoteGpuHost(int id, int type, RamProvisioner ramProvisioner, BwProvisioner bwProvisioner, long storage,
List<? extends Pe> peList, VmScheduler vmScheduler, PowerModel powerModel) {
super(id, type, ramProvisioner, bwProvisioner, storage, peList, vmScheduler, powerModel);
// TODO Auto-generated constructor stub
}
@Override
public double updateVmsProcessing(double currentTime) {
// Update VMs progress
double smallerTime = Double.MAX_VALUE;
for (Vm vm : getVmList()) {
double time = vm.updateVmProcessing(currentTime, getVmScheduler().getAllocatedMipsForVm(vm));
if (time > 0.0 && time < smallerTime) {
smallerTime = time;
}
}
// Update resident vgpus progress, if any
if (getVideoCardAllocationPolicy() == null) {
return smallerTime;
} // else
// To collect Vm that are probably sharing a resource
List<Vgpu> runningVgpus = new ArrayList<Vgpu>();
// Collect running gpu vms
for (Vgpu vgpu : getVideoCardAllocationPolicy().getVgpuVideoCardMap().keySet()) {
if (vgpu.getGpuTaskScheduler().runningTasks() > 0) {
runningVgpus.add(vgpu);
}
}
for (Vgpu vgpu : getVideoCardAllocationPolicy().getVgpuVideoCardMap().keySet()) {
@SuppressWarnings("unchecked")
PerformanceScheduler<Vgpu> vgpuScheduler = (PerformanceScheduler<Vgpu>) getVideoCardAllocationPolicy()
.getVgpuVideoCardMap().get(vgpu).getVgpuScheduler();
double time = vgpu.updateTaskProcessing(currentTime, vgpuScheduler.getAvailableMips(vgpu, runningVgpus));
if (time > 0.0 && time < smallerTime) {
smallerTime = time;
}
}
return smallerTime;
}
@Override
public boolean isSuitableForVm(Vm vm) {
// Checking host resources
boolean hasStorage = getStorage() >= vm.getSize();
boolean hasRam = getRamProvisioner().isSuitableForVm(vm, vm.getCurrentRequestedRam());
boolean hasBw = getBwProvisioner().isSuitableForVm(vm, vm.getCurrentRequestedBw());
boolean hasMips = getVmScheduler().getPeCapacity() >= vm.getCurrentRequestedMaxMips()
&& getVmScheduler().getAvailableMips() >= vm.getCurrentRequestedTotalMips();
if (!hasStorage || !hasRam || !hasBw || !hasMips) {
return false;
}
// Checking GPU resources of the host
Vgpu vgpu = ((GpuVm) vm).getVgpu();
// if the VM has no vGPU -> return true.
if (vgpu == null) {
return true;
}
// if the VM has a remote vGPU
else if (!isVgpuLocal(vgpu)) {
return true;
}
// if the VM has a local vGPU and the host has no local video card -> return
// false.
else if (getVideoCardAllocationPolicy() == null) {
return false;
}
// if the VM has a local vGPU and the host has video card(s) -> check
// compatibility.
return getVideoCardAllocationPolicy().isSuitable(vgpu);
}
@Override
public boolean vmCreate(Vm vm) {
// Allocation of host resources
if (getStorage() < vm.getSize()) {
Log.printConcatLine("[VmScheduler.vmCreate] Allocation of VM #", vm.getId(), " to Host #", getId(),
" failed by storage");
return false;
}
if (!getRamProvisioner().allocateRamForVm(vm, vm.getCurrentRequestedRam())) {
Log.printConcatLine("[VmScheduler.vmCreate] Allocation of VM #", vm.getId(), " to Host #", getId(),
" failed by RAM");
return false;
}
if (!getBwProvisioner().allocateBwForVm(vm, vm.getCurrentRequestedBw())) {
Log.printConcatLine("[VmScheduler.vmCreate] Allocation of VM #", vm.getId(), " to Host #", getId(),
" failed by BW");
getRamProvisioner().deallocateRamForVm(vm);
return false;
}
if (!getVmScheduler().allocatePesForVm(vm, vm.getCurrentRequestedMips())) {
Log.printConcatLine("[VmScheduler.vmCreate] Allocation of VM #", vm.getId(), " to Host #", getId(),
" failed by MIPS");
getRamProvisioner().deallocateRamForVm(vm);
getBwProvisioner().deallocateBwForVm(vm);
return false;
}
setStorage(getStorage() - vm.getSize());
// Device (GPU) allocation
Vgpu vgpu = ((GpuVm) vm).getVgpu();
// if the VM has no vGPU or if the VM has a remote vGPU -> success.
if (vgpu == null || !isVgpuLocal(vgpu)) {
getVmList().add(vm);
vm.setHost(this);
return true;
}
// if the VM has a local vGPU but the host has no local video card -> fail.
else if (getVideoCardAllocationPolicy() == null) {
rollbackHostResourceAllocation(vm);
return false;
}
// if the VM has a local vGPU and the host has local video card(s) -> check
boolean isVgpuAllocated = getVideoCardAllocationPolicy().allocate(vgpu, vgpu.getPCIeBw());
// if vGPU allocation failed -> fail.
if (!isVgpuAllocated) {
Log.printConcatLine("[VmScheduler.vmCreate] Allocation of GPU accelerated VM #", vm.getId(), " to Host #",
getId(), " failed due to vgpu allocation failure.");
rollbackHostResourceAllocation(vm);
return false;
} // else -> success
getVmList().add(vm);
vm.setHost(this);
return true;
}
@Override
protected void vmDeallocate(Vm vm) {
// vm removal
getVmList().remove(vm);
// Vm deallocation */
rollbackHostResourceAllocation(vm);
// get vgpu
Vgpu vgpu = ((GpuVm) vm).getVgpu();
// if the VM has no vGPU -> done
if (vgpu == null) {
return;
} else if (isVgpuLocal(vgpu) && getVideoCardAllocationPolicy() != null) {
// Vgpu deallocation
getVideoCardAllocationPolicy().deallocate(vgpu);
}
}
protected boolean isVgpuLocal(Vgpu vgpu) {
if (vgpu.getType() == RemoteVgpuTags.LOCAL_EXCLUSIVE || vgpu.getType() == RemoteVgpuTags.LOCAL_SHARED) {
return true;
}
return false;
}
}
package org.cloudbus.cloudsim.gpu.remote;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.cloudbus.cloudsim.Host;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Vm;
import org.cloudbus.cloudsim.core.CloudSim;
import org.cloudbus.cloudsim.gpu.GpuVm;
import org.cloudbus.cloudsim.gpu.GpuVmAllocationPolicySimple;
import org.cloudbus.cloudsim.gpu.Vgpu;
import org.cloudbus.cloudsim.gpu.power.PowerGpuHost;
/**
* This class extends {@link GpuVmAllocationPolicySimple} to add support for GPU
* remoting.
*
* @author Ahmad Siavashi
*
*/
public class RemoteGpuVmAllocationPolicyFirstFit extends GpuVmAllocationPolicySimple {
private List<PowerGpuHost> gpuHosts;
private Map<Vgpu, RemoteGpuHost> remoteVgpuHosts;
/**
* This class extends {@link GpuVmAllocationPolicySimple} to add support for GPU
* remoting.
*
* @see {@link GpuVmAllocationPolicySimple}
*/
public RemoteGpuVmAllocationPolicyFirstFit(List<? extends Host> list) {
super(list);
setRemoteVgpuHosts(new HashMap<>());
setGpuHosts(new ArrayList<>());
updateGpuHosts(getHostList());
}
@Override
public boolean allocateHostForVm(Vm vm, Host host) {
if (!getVmTable().containsKey(vm.getUid())) {
Vgpu vgpu = ((GpuVm) vm).getVgpu();
boolean result = host.vmCreate(vm);
if (!result) {
return false;
}
// if Vm has no Vgpu or has a local Vgpu which is allocated in vmCreate
else if (vgpu == null || !isVgpuRemote(vgpu)) {
getVmTable().put(vm.getUid(), host);
Log.formatLine("%.2f: VM #" + vm.getId() + " has been allocated to the host #" + host.getId(),
CloudSim.clock());
Log.printLine("{'clock': " + CloudSim.clock() + ", 'event': 'vm allocation', 'vm': " + vm.getId()
+ ", 'host': " + host.getId() + "}");
return true;
}
// if Vm has a remote Vgpu
for (PowerGpuHost gpuHost : getGpuHosts()) {
boolean isVgpuAllocated = gpuHost.getVideoCardAllocationPolicy().allocate(vgpu, vgpu.getPCIeBw());
if (isVgpuAllocated) {
getVmTable().put(vm.getUid(), host);
Log.formatLine("%.2f: VM #" + vm.getId() + " has been allocated to the host #" + host.getId(),
CloudSim.clock());
Log.printLine("{'clock': " + CloudSim.clock() + ", 'event': 'vm allocation', 'vm': " + vm.getId()
+ ", 'host': " + host.getId() + "}");
return true;
}
}
// failed to find a remote GPU -> free allocated resources
host.vmDestroy(vm);
}
return false;
}
protected boolean isVgpuRemote(Vgpu vgpu) {
if (vgpu.getType() == RemoteVgpuTags.REMOTE_EXCLUSIVE || vgpu.getType() == RemoteVgpuTags.REMOTE_SHARED) {
return true;
}
return false;
}
// Add GPU-equipped hosts
protected void updateGpuHosts(List<PowerGpuHost> hosts) {
getGpuHosts().clear();
for (PowerGpuHost host : hosts) {
if (host.getVideoCardAllocationPolicy() != null) {
getGpuHosts().add(host);
}
}
}
public List<PowerGpuHost> getGpuHosts() {
return gpuHosts;
}
protected void setGpuHosts(List<PowerGpuHost> gpuHosts) {
this.gpuHosts = gpuHosts;
}
public Map<Vgpu, RemoteGpuHost> getRemoteVgpuHosts() {
return remoteVgpuHosts;
}
protected void setRemoteVgpuHosts(Map<Vgpu, RemoteGpuHost> remoteVgpuHosts) {
this.remoteVgpuHosts = remoteVgpuHosts;
}
}
package org.cloudbus.cloudsim.gpu.remote;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.Predicate;
import org.apache.commons.lang3.NotImplementedException;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.gpu.Pgpu;
import org.cloudbus.cloudsim.gpu.Vgpu;
import org.cloudbus.cloudsim.gpu.VgpuScheduler;
import org.cloudbus.cloudsim.gpu.VgpuSchedulerFairShare;
import org.cloudbus.cloudsim.gpu.performance.PerformanceVgpuSchedulerFairShare;
import org.cloudbus.cloudsim.gpu.performance.models.PerformanceModel;
import org.cloudbus.cloudsim.gpu.selection.PgpuSelectionPolicy;
import org.cloudbus.cloudsim.util.MathUtil;
/**
* Extends {@link VgpuSchedulerFairShare} to add support for GPU remoting.
*
* @author Ahmad Siavashi
*/
public class RemoteVgpuSchedulerFairShare extends PerformanceVgpuSchedulerFairShare {
/**
* Extends {@link VgpuSchedulerFairShare} to add support for GPU remoting.
*
* @see {@link VgpuSchedulerFairShare}
*/
public RemoteVgpuSchedulerFairShare(int videoCardType, List<Pgpu> pgpuList, PgpuSelectionPolicy pgpuSelectionPolicy,
PerformanceModel<VgpuScheduler, Vgpu> performanceModel) {
super(videoCardType, pgpuList, pgpuSelectionPolicy, performanceModel);
}
@Override
protected boolean isVideoCardSuitableForVgpu(Vgpu vgpu) {
throw new NotImplementedException("This method is not implemented.");
}
@SuppressWarnings("unchecked")
@Override
public boolean isSuitable(final Vgpu vgpu) {
final int gddramShare = vgpu.getCurrentRequestedGddram();
List<Pgpu> candidates = (List<Pgpu>) CollectionUtils.select(getPgpuList(), new Predicate() {
@Override
public boolean evaluate(Object arg) {
Pgpu pgpu = (Pgpu) arg;
if (!pgpu.getGddramProvisioner().isSuitableForVgpu(vgpu, gddramShare)) {
return false;
}
if (isVgpuExclusive(vgpu) && getPgpuVgpuMap().get(pgpu).size() > 0) {
return false;
}
return true;
}
});
if (candidates.isEmpty()) {
return false;
}
return true;
}
@Override
@SuppressWarnings("unchecked")
public boolean allocatePgpuForVgpu(final Vgpu vgpu, final List<Double> mipsShare, final int gddramShare,
final long bwShare) {
List<Pgpu> candidates = (List<Pgpu>) CollectionUtils.select(getPgpuList(), new Predicate() {
@Override
public boolean evaluate(Object arg) {
Pgpu pgpu = (Pgpu) arg;
if (!pgpu.getGddramProvisioner().isSuitableForVgpu(vgpu, gddramShare)) {
return false;
}
if (isVgpuExclusive(vgpu) && getPgpuVgpuMap().get(pgpu).size() > 0) {
return false;
}
return true;
}
});
Pgpu selectedPgpu = getPgpuSelectionPolicy().selectPgpu(this, candidates);
if (selectedPgpu == null) {
return false;
}
// set processing power according to the allocated pgpu
vgpu.setNumberOfPes(selectedPgpu.getPeList().size());
vgpu.setPeMips(selectedPgpu.getPeList().get(0).getMips());
final List<Double> VgpuMipsShare = vgpu.getCurrentRequestedMips();
final long vgpuBwShare = selectedPgpu.getBwProvisioner().getBw();
selectedPgpu.getGddramProvisioner().allocateGddramForVgpu(vgpu, gddramShare);
// memory BW is assumed/supposed to be ignored
selectedPgpu.getBwProvisioner().allocateBwForVgpu(vgpu, vgpuBwShare);
getPgpuVgpuMap().get(selectedPgpu).add(vgpu);
getRequestedMipsMap().put(vgpu, VgpuMipsShare);
getVgpuPeMap().put(vgpu, new ArrayList<Pe>());
double mipsChange = MathUtil.sum(VgpuMipsShare);
redistributeMipsDueToOverSubscription(selectedPgpu, mipsChange);
return true;
}
protected boolean isVgpuExclusive(Vgpu vgpu) {
int type = vgpu.getType();
if (type == RemoteVgpuTags.LOCAL_EXCLUSIVE || type == RemoteVgpuTags.REMOTE_EXCLUSIVE) {
return true;
}
return false;
}
}
package org.cloudbus.cloudsim.gpu.remote;
/**
*
* The modes supported in remote GPU virtualization.
*
* @author Ahmad Siavashi
*
*/
public class RemoteVgpuTags {
public static final int REMOTE_EXCLUSIVE = 0;
public static final int REMOTE_SHARED = 1;
public static final int LOCAL_EXCLUSIVE = 2;
public static final int LOCAL_SHARED = 3;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment