Ignore:
Timestamp:
12/27/12 16:27:59 (12 years ago)
Author:
wojtekp
Message:
 
Location:
papers/SMPaT-2012_DCWoRMS
Files:
5 added
6 edited

Legend:

Unmodified
Added
Removed
  • papers/SMPaT-2012_DCWoRMS/elsarticle-DCWoRMS.aux

    r712 r713  
    6969\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Thermal models}{18}} 
    7070\newlabel{sec:thermal}{{4.3}{18}} 
     71\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces  CoolEmAll testbed}}{19}} 
     72\newlabel{testBed}{{1}{19}} 
    7173\@writefile{toc}{\contentsline {section}{\numberline {5}Experiments and evaluation}{19}} 
    7274\newlabel{sec:experiments}{{5}{19}} 
    7375\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Testbed description}{19}} 
    74 \@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Computational analysis}{19}} 
    75 \@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces CoolEmAll testbed}}{20}} 
    76 \@writefile{toc}{\contentsline {section}{\numberline {6}DCWoRMS application/use cases}{20}} 
    77 \newlabel{sec:coolemall}{{6}{20}} 
    78 \bibcite{fit4green}{{15}{}{{}}{{}}} 
    79 \@writefile{toc}{\contentsline {section}{\numberline {7}Conclusions and future work}{21}} 
    80 \newlabel{}{{7}{21}} 
    81 \bibcite{CloudSim}{{1}{}{{}}{{}}} 
    82 \bibcite{DCSG}{{2}{}{{}}{{}}} 
    83 \bibcite{DCD_Romonet}{{3}{}{{}}{{}}} 
    84 \bibcite{networks}{{16}{}{{}}{{}}} 
    85 \bibcite{Ghislain}{{4}{}{{}}{{}}} 
    86 \bibcite{games}{{6}{}{{}}{{}}} 
    87 \bibcite{GreenCloud}{{5}{}{{}}{{}}} 
    88 \bibcite{sla}{{17}{}{{}}{{}}} 
    89 \bibcite{GSSIM}{{6}{}{{}}{{}}} 
    90 \bibcite{GSSIM_Energy}{{7}{}{{}}{{}}} 
    91 \bibcite{hintemann}{{2}{}{{}}{{}}} 
    92 \bibcite{koomey}{{8}{}{{}}{{}}} 
    93 \bibcite{GWF}{{8}{}{{}}{{}}} 
    94 \bibcite{SLURM}{{9}{}{{}}{{}}} 
    95 \bibcite{SWF}{{10}{}{{}}{{}}} 
    96 \bibcite{TORQUE}{{11}{}{{}}{{}}} 
    97 \bibcite{colt}{{19}{}{{}}{{}}} 
    98 \bibcite{coolemall}{{20}{}{{}}{{}}} 
    99 \bibcite{ecocooling}{{21}{}{{}}{{}}} 
    100 \bibcite{montblanc}{{22}{}{{}}{{}}} 
    101 \bibcite{pue}{{23}{}{{}}{{}}} 
    102 \bibcite{sgi}{{24}{}{{}}{{}}} 
    103 \global\NAT@numberstrue 
     76\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Evaluated applications}{20}} 
     77\@writefile{toc}{\contentsline {subsection}{\numberline {5.3}Methodology}{20}} 
     78\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces  Workload characteristics}}{21}} 
     79\newlabel{workloadChatacteristics}{{2}{21}} 
     80\@writefile{toc}{\contentsline {subsection}{\numberline {5.4}Computational analysis}{21}} 
     81\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces  Random strategy}}{22}} 
     82\newlabel{fig:70r}{{9}{22}} 
     83\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces  Random + switching off unused nodes strategy}}{23}} 
     84\newlabel{fig:70rnpm}{{10}{23}} 
     85\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces  Energy usage optimization strategy}}{24}} 
     86\newlabel{fig:70eo}{{11}{24}} 
     87\@writefile{toc}{\contentsline {section}{\numberline {6}DCWoRMS application/use cases}{24}} 
     88\newlabel{sec:coolemall}{{6}{24}} 
     89\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces  Energy usage optimization + switching off unused nodes strategy}}{25}} 
     90\newlabel{fig:70eonpm}{{12}{25}} 
     91\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces  Frequency downgrading strategy}}{26}} 
     92\newlabel{fig:70dfs}{{13}{26}} 
     93\bibcite{fit4green}{{1}{}{{}}{{}}} 
     94\bibcite{CloudSim}{{2}{}{{}}{{}}} 
     95\bibcite{DCSG}{{3}{}{{}}{{}}} 
     96\bibcite{DCD_Romonet}{{4}{}{{}}{{}}} 
     97\bibcite{networks}{{5}{}{{}}{{}}} 
     98\@writefile{toc}{\contentsline {section}{\numberline {7}Conclusions and future work}{27}} 
     99\newlabel{}{{7}{27}} 
     100\bibcite{Ghislain}{{6}{}{{}}{{}}} 
     101\bibcite{games}{{7}{}{{}}{{}}} 
     102\bibcite{GreenCloud}{{8}{}{{}}{{}}} 
     103\bibcite{sla}{{9}{}{{}}{{}}} 
     104\bibcite{GSSIM}{{10}{}{{}}{{}}} 
     105\bibcite{GSSIM_Energy}{{11}{}{{}}{{}}} 
     106\bibcite{hintemann}{{12}{}{{}}{{}}} 
     107\bibcite{koomey}{{13}{}{{}}{{}}} 
     108\bibcite{GWF}{{14}{}{{}}{{}}} 
     109\bibcite{SLURM}{{15}{}{{}}{{}}} 
     110\bibcite{SWF}{{16}{}{{}}{{}}} 
     111\bibcite{TORQUE}{{17}{}{{}}{{}}} 
     112\bibcite{colt}{{18}{}{{}}{{}}} 
     113\bibcite{coolemall}{{19}{}{{}}{{}}} 
     114\bibcite{ecocooling}{{20}{}{{}}{{}}} 
     115\bibcite{montblanc}{{21}{}{{}}{{}}} 
     116\bibcite{pue}{{22}{}{{}}{{}}} 
     117\bibcite{sgi}{{23}{}{{}}{{}}} 
     118\providecommand\NAT@force@numbers{}\NAT@force@numbers 
  • papers/SMPaT-2012_DCWoRMS/elsarticle-DCWoRMS.fdb_latexmk

    r699 r713  
    11# Fdb version 2 
    2 ["pdflatex"] 1355227032 "elsarticle-DCWoRMS.tex" "elsarticle-DCWoRMS.pdf" "elsarticle-DCWoRMS"  
     2["pdflatex"] 1356621852 "elsarticle-DCWoRMS.tex" "elsarticle-DCWoRMS.pdf" "elsarticle-DCWoRMS"  
    33  "/usr/local/texlive/2010/texmf-dist/tex/context/base/supp-pdf.mkii" 1251025892 71625 fad1c4b52151c234b6873a255b0ad6b3 "" 
    44  "/usr/local/texlive/2010/texmf-dist/tex/generic/oberdiek/etexcmds.sty" 1267408169 5670 cacb018555825cfe95cd1e1317d82c1d "" 
     
    2121  "/usr/local/texlive/2010/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg" 1279039959 678 4792914a8f45be57bb98413425e4c7af "" 
    2222  "/usr/local/texlive/2010/texmf-dist/tex/latex/latexconfig/graphics.cfg" 1278958963 3563 d35e897cae3b8c6848f6677b73370b54 "" 
     23  "/usr/local/texlive/2010/texmf-dist/tex/latex/multirow/multirow.sty" 1137110401 7374 f7c1f13fc632dd5c9b220a247d233082 "" 
    2324  "/usr/local/texlive/2010/texmf-dist/tex/latex/natbib/natbib.sty" 1262999190 45424 f4af154491778dfeae4b00e701813f8d "" 
    2425  "/usr/local/texlive/2010/texmf-dist/tex/latex/oberdiek/epstopdf-base.sty" 1272994501 11438 cd11ce690f4a4b3f2b573ca8c8281d35 "" 
     
    2930  "/usr/local/texlive/2010/texmf-dist/tex/latex/psnfss/upsy.fd" 1137110629 148 2da0acd77cba348f34823f44cabf0058 "" 
    3031  "/usr/local/texlive/2010/texmf-dist/tex/latex/psnfss/upzd.fd" 1137110629 148 b2a94082cb802f90d3daf6dd0c7188a0 "" 
    31   "elsarticle-DCWoRMS.aux" 1355227033 4858 b29e95fa9f7f5edb3bbc73cc478e766e "" 
    32   "elsarticle-DCWoRMS.spl" 1355227032 0 d41d8cd98f00b204e9800998ecf8427e "" 
    33   "elsarticle-DCWoRMS.tex" 1355227025 47973 7c506fc13b9826e269db7ff5a5da17de "" 
     32  "elsarticle-DCWoRMS.aux" 1356621854 6549 90dde93803785e474cb3357362f47d80 "" 
     33  "elsarticle-DCWoRMS.spl" 1356621852 0 d41d8cd98f00b204e9800998ecf8427e "" 
     34  "elsarticle-DCWoRMS.tex" 1356621852 61429 047e7dd4e2d18c2324559e53dd759a54 "" 
    3435  "elsarticle.cls" 1352447924 26095 ad44f4892f75e6e05dca57a3581f78d1 "" 
     36  "fig/70dfs.png" 1356617710 212573 e013d714dd1377384ed7793222210051 "" 
     37  "fig/70eo.png" 1356617710 273614 365b7ed476dccc7d4946aebeb6a597a0 "" 
     38  "fig/70eonpm.png" 1356617720 269953 687d1a3a786747913740a8e220b0f9e5 "" 
     39  "fig/70r.png" 1356617710 288840 3392dd2493597d4774f8e6039cd8eb2d "" 
     40  "fig/70rnpm.png" 1356619980 61435 4d78965725cd9c1fb907f7d1af9421e7 "" 
    3541  "fig/airModel.png" 1353405890 41411 f33639119a59ae1d2eabb277137f0042 "" 
    3642  "fig/arch.png" 1353403503 184917 61b6fddc71ce603779f09b272cd2f164 "" 
  • papers/SMPaT-2012_DCWoRMS/elsarticle-DCWoRMS.tex

    r712 r713  
    4646%% The amssymb package provides various useful mathematical symbols 
    4747\usepackage{amssymb} 
     48\usepackage{multirow} 
    4849%% The amsthm package provides extended theorem environments 
    4950%% \usepackage{amsthm} 
     
    159160\section{Related Work} 
    160161 
    161 TODO - shorten, correct (ITS A DRAFT VERSION) 
    162  
    163 The growing importance of energy efficiency in information technologies led to significant interest in energy saving methods for computing systems. Therefore, intelligent resource management policies are gaining popularity when considering the energy efficiency of IT infrastructures. Nevertheless, studies of impact of scheduling strategies on energy consumption require a large effort and are difficult to perform in real distributed environments. To overcome these issues extensive research has been conducted in the area of modeling and simulation tools. As a result, a wide variety of simulation tools that address the green computing issues in distributed infrastructures have been proposed. Among them the most popular ones are: GreenCloud \cite{GreenCloud}, CloudSim \cite{CloudSim} and DCSG Simulator \cite{DCSG} 
    164  
    165 GreenCloud is a C++ based simulation environment for studying the energy-efficiency of cloud computing data centers CloudSim is a simulation tool that allows modeling of cloud computing environments and evaluation of resource provisioning algorithms. Finally, the DCSG Simulator is a data center cost and energy simulator calculating the power and cooling schema of the data center equipment.  
    166  
    167 Although the aforementioned toolkits are capable of establishing the data center environment, none of them provides user with flexibility in the term of detailed resource modeling. GreenCloud defines switches, links and servers that are representedas single core nodes that are responsible for task execution and may contain different scheduling strategies. Contrary to what the GreenCloud name may suggest, it does not allow testing the impact of a virtualization-based approach on the resource management.  
    168 CloudSim allows creating a simple resources hierarchy containing computing and network resources that consist of machines and processors. To simulate a real cloud computing data center, it provides an extra virtualization layer that acts as an execution, management, and hosting environment for application services. It is responsible for the VM provisioning process as well as managing the VM life cycle with respect to the cost metrics and economic policies. 
    169 In DCSG Simulator, user is able to take into account a wide variety of mechanical and electrical devices and for each of them numerous factors can be defined, including device capacity and efficiency as well as data center conditions. Apart from data center facilities the IT equipment can be modeled and simulated. 
    170  
    171 The general idea behind all of the analyzed tools is to enable studies concerning energy efficiency in distributed infrastructures. GreenCloud approach enables fine-grained modeling of energy usage associated with computing servers and network components. For example, the server power consumption model implemented in GreenCloud depends on the server state as well as its utilization. The CloudSim framework provides basic models to validate and evaluate energy-conscious provisioning of techniques and algorithms. Each computing node can be extended with a power model that estimates the current the power consumption. Within the DCSG Simulator, performance of each data center equipment (facility and IT) is determined by a combination of factors, including workload, local conditions, the manufacturer's specifications of the machine's components and the way in which the machine is utilized based on its provisioned IT load. In DCWoRMS, the plugin idea has been introduced that offers emulating the behavior of computing resources in terms of power consumption. Additionally, it delivers detailed information concerning resource and application characteristics needed to define more sophisticated power draw models. 
    172  
    173 In order to emulate the behavior of real computing systems, green computing simulator should address also the energy-aware resource management. In this term, GreenCloud offers capturing the effects of both of the Dynamic Voltage and Frequency Scaling (DVFS) and Dynamic Power Management (DPM) schemes. At the links and switches level, it supports downgrading the transmission rate and putting some network equipment into a sleep mode, respectively. CloudSim comes with a set of predefined and extendable policies that manage the process of VM migrations according to the total energy consumed in order to optimize the power consumption. However, the proposed approach is not sufficient for modeling more sophisticated policies like frequency scaling techniques and managing resource power states. With respect to the white paper, Romonet’s tool implements a set of basic energy-efficient rules that have been developed on the basis of detailed understanding of the data center as a system. The output of this simulation is a set of energy and cost data representing the IT devices (including PUE and DCiE). DCWoRMS introduces the dedicated interface that provides methods to determine detailed information about each resource and its components energy state and allows changing its current energy state. As this interface is accessible from scheduling plugins, energy management may be admitted as a part of whole scheduling process. Availability of these interfaces in schedulers supports implement different strategies such as centralized energy management, self-management of computing resources and mixed models. 
    174  
    175 In terms of application modeling, all tools, except DCSG Simulator, describe the application with a number of computational and communicational requirements. In addition, GreenCloud and DCWoRmS allow introducing the Qos requirements (typical for cloud computing applications) by taking into account the time constraints during the simulation. DCSG Simulator instead of modeling of the single application, introduce the definition of workload applied the computing devices that leads to a given utilization level. Nevertheless, only DCWoRMS supports application performance modeling. In the case of other tools, aforementioned capabilities allow only incorporating simple requirements that are taken into account during scheduling, they do not affect the application execution. On the other hand CloudSim offers modeling of utilization models that are used to estimate the current load of processor, bandwidth and memory and can be taken into account during the task allocation process.  
    176  
    177 GreenCloud, CloudSim and DCWoRMS are released as Open Source under the General Public License Agreement. Romonet’s tool is available under an OSL V3.0 open-source license, however, it can be only accessed by the DCSG members. 
    178  
    179  
     162The growing importance of energy efficiency in information technologies led to significant interest in energy saving methods for computing systems. Nevertheless, studies of impact of resource management policies on energy efficiency of IT infrastructures require a large effort and are difficult to perform in real distributed environments. To overcome these issues extensive research has been conducted in the area of modeling and simulation and variety of tools that address the green computing issues in distributed infrastructures has been proposed. Among them the most popular ones are: GreenCloud \cite{GreenCloud}, CloudSim \cite{CloudSim} and DCSG Simulator \cite{DCSG}. 
     163 
     164GreenCloud is a C++ based simulation environment for studying the energy-efficiency of cloud computing data centers. CloudSim is a simulation tool that allows modeling of cloud computing environments and evaluation of resource provisioning algorithms. Finally, the DCSG Simulator is a data center cost and energy simulator calculating the power and cooling schema of the data center equipment.  
     165 
     166The scope of the aforementioned toolkits concerns the data center environments. However, all of them, except DCWoRMS presented in this paper, imposes and restricts user in terms of modeled resources. GreenCloud defines switches, links and servers that are responsible for task execution and may contain different scheduling strategies. Contrary to what the GreenCloud name may suggest, it does not allow testing the impact of a virtualization-based approach on the resource management. CloudSim allows creating a simple resources hierarchy that consist of machines and processors. To simulate a real cloud computing data center, it provides an extra virtualization layer that is responsible for the VM provisioning process as well as managing the VM life cycle. In DCSG Simulator, user is able to take into account a wide variety of mechanical and electrical devices as well as the IT equipment and for each of them numerous factors can be defined, including device capacity and efficiency as well as data center conditions. 
     167 
     168The general idea behind all of the analyzed tools is to enable studies concerning energy efficiency in distributed infrastructures. GreenCloud approach enables simulation of energy usage associated with computing servers and network components. For example, the server power consumption model implemented in GreenCloud depends on the server state as well as its utilization. The CloudSim framework provides basic models to evaluate energy-conscious provisioning policies. Each computing node can be extended with a power model that estimates the current the power consumption. Within the DCSG Simulator, performance of each data center equipment (facility and IT) is determined by a combination of factors, including workload, local conditions, the manufacturer's specifications and the way in which it is utilized. In DCWoRMS, the plugin idea has been introduced that offers emulating the behavior of computing resources in terms of power consumption. Additionally, it delivers detailed information concerning resource and application characteristics needed to define more sophisticated power draw models. 
     169 
     170In order to emulate the behavior of real computing systems, green computing simulator should address also the energy-aware resource management. In this term, GreenCloud offers capturing the effects of both of the Dynamic Voltage and Frequency Scaling (DVFS) and Dynamic Power Management schemes. At the links and switches level, it supports downgrading the transmission rate and putting some network equipment into a sleep mode. CloudSim comes with a set of predefined and extensible policies that manage the process of VM migrations according to the total energy consumed in order to optimize the power consumption. However, the proposed approach is not sufficient for modeling more sophisticated policies like frequency scaling techniques and managing resource power states. Romonet’s tool is told to implement a set of basic energy-efficient rules that have been developed on the basis of detailed understanding of the data center as a system. The output of this simulation is a set of energy, like PUE, and cost data representing the IT devices. DCWoRMS introduces a dedicated interface that provides methods to obtain the detailed information about each resource and its components energy consumption and allows changing its current energy state. Availability of these interfaces in scheduling plugin supports implementation of various strategies such as centralized energy management, self-management of computing resources and mixed models. 
     171 
     172In terms of application modeling, all tools, except DCSG Simulator, describe the application with a number of computational and communicational requirements. In addition, GreenCloud and DCWoRMS allow introducing the QoS requirements (typical for cloud computing applications) by taking into account the time constraints during the simulation. DCSG Simulator instead of modeling of the single application, enables the definition of workload that leads to a given utilization level. However, only DCWoRMS supports application performance modeling by not only incorporating simple requirements that are taken into account during execution, but also by allowing specification of task execution time. 
     173 
     174GreenCloud, CloudSim and DCWoRMS are released as Open Source under the GPL. Romonet’s tool is available under an OSL V3.0 open-source license, however, it can be only accessed by the DCSG members. 
     175 
     176Summarizing, DCWoRMS stand out from other tools due to the flexibility in terms of data center equipment and structure definition. 
     177Moreover, it allows to associate the energy consumption not only with the current power state and resource utilization but also with the particular set of applications running on it. The main strength of CloudSim lies in implementation of the complex scheduling and task execution schemes involving resource virtualization techniques. However,the energy efficiency aspect is limited only to the VM management, The GreenCloud focuses on data center resources with particular attention to the network infrastructure and the most popular energy management approaches. DCSG simulator allows to take into account also non-computing devices, nevertheless it seems to be hardly customizable tool. 
    180178 
    181179\section{DCWoRMS} 
     
    419417\section{Experiments and evaluation}\label{sec:experiments} 
    420418 
    421 Results + RECS and MOP description 
    422  
    423 .... 
    424  
    425419In this section, we present computational analysis that were conducted to emphasize the role of modelling and simulation in studying computing systems performance. We carried out two types of experiments. The former one aimed at demonstrating the capabilities of the simulator in termis of verifying the research hypotheses. The latter set of experiments was performed on the CoolEmAll testbed and then repeated using DCWoRMS tool. The comparative analysis of obtained results shows the reproducibility of experiments and prove the correctness of the adopted models and assumptions. 
    426420 
    427421\subsection{Testbed description} 
    428422 
    429 The RECS Cluster System is an 18 node computer system that has an monitoring and controlling mechanism integrated. Through the integrated novel monitoring approach of the RECS Cluster System the network load can be reduced, the dependency of polling every single 
    430 compute node at operation system layer can be avoided. Furthermore this concept build up a basis on which new monitoring- and controlling-concepts can be developed. Therefore, each compute node of the RECS Cluster Server is connected to an Operation System independent microcontroller that collects the most important sensor data like temperature, power consumption and the status (on/off) from every single node. 
    431  
     423 
     424To obtain values of power consumption that could be later used in DCWoRMS environment to build the model and to evaluate resource management policies we ran a set of applications / benchmarks on the physical testbed. For experimental purposes we choose the high-density Resource Efficient Cluster Server (RECS) system. The single RECS unit consists of 18 single CPU modules, each of them can be treated as an individual node of PC class. Configuration of our RECS unit is presented in Table~\ref{testBed}. The RECS system was chosen due to its heterogeneous platform with very high density and energy efficiency that has a monitoring and controlling mechanism integrated. The built-in and additional sensors allow to monitor the complete testbed at a very fine granularity level without the negative impact of the computing- and network-resources.  
    432425 
    433426\begin {table}[ tp] 
     
    438431Type & Memory (RAM) & Count  \\ 
    439432\hline 
    440  Intel i7 & 16 GB &     4 \\ 
     433 Intel i7 & 16 GB &     8 \\ 
    441434AMD Fusion T40N 64 Bit & 4 GB   & 6 \\ 
    442435Atom D510 64 Bit & 2 GB &       4 \\ 
    443 Atom Z510 VT &2 GB &    4 \\ 
    444436\hline 
    445437\multicolumn{3}{c}{Storage} \\ 
     
    449441\hline 
    450442\end{tabular} 
    451 \caption {CoolEmAll testbed} 
     443\caption {\label{testBed} CoolEmAll testbed} 
    452444\end {table} 
    453445 
    454 %Node i7, 16 GB RAM     4 
    455 %Node AMD Fusion T40N Dualcore, 1,0 Ghz, 4 GB (64 Bit)  6 
    456 %Node Atom D510 64 Bit, 2 GB    4 
    457 %Node Atom Z510 VT, 2 GB        4 
    458 %RECS | Storage Head 520, 16 x 300 GB SSD, 2 x 10 Gbit/s CX4 
    459  
    460 \subsection{Computational analysis} 
    461  
    462 TODO - experiments 
     446\subsection{Evaluated applications} 
    463447 
    464448To demonstrate capabilities of the simulator in terms of energy efficiency modeling we present examples of experiments performed using the DCWoRMS. First we carried out a set of tests on the real hardware used as a CoolEmAll testbed to build the performance and energy profile of applications. Then we applied this data into the simulation environment and used to investigate different approaches to energy-aware resource management.  
    465449The following applications were evaluated: 
     450 
     451\textbf{Abinit} is a widely-used application for computational physics simulating systems made of electrons and nuclei to be calculated within density functional theory. 
     452 
     453\textbf{C-Ray} is a ray-tracing benchmark that stresses floating point performance of a CPU. The test is configured with the 'scene' file at 16000x9000 resolution. 
     454 
     455\textbf{Linpack} benchmark is used to evaluate system floating point performance. It is based on the Gaussian elimination methods that solves a dense N by N system of linear equations. 
     456 
     457\textbf{Tar} it is a widely used data archiving software [tar]. In the tests the task was to create one compressed file of Linux kernel, which is about 2,3GB size, using bzip2. 
     458 
     459\textbf{FFTE} benchmark measures the floating-point arithmetic rate of double precision complex one-dimensional Discrete Fourier Transforms of 1-, 2-, and 3-dimensional sequences of length $2^{p} * 3^{q} * 5^{r}$. In our tests only one core was used to run the application 
     460 
     461 
     462 
     463\subsection{Methodology} 
     464 
     465Every chosen application / benchmark was executed on each type of node, for all frequencies supported by the CPU and for different levels of parallelization (number of cores).  To eliminate the problem with assessing which part of the power consumption comes from which application, in case when more then one application is ran on the node, the queuing system (SLURM) was configured to run jobs in exclusive mode (one job per node). Such configuration is often used for at least dedicated part of HPC resources. The advantage of the exclusive mode scheduling policy consist in that the job gets all the resources of the assigned nodes for optimal parallel performance and applications running on the same node do not influence each other. For every configuration of application, type of node and CPU frequency we measure the average power consumption of the node and the execution time. The aforementioned values  were used to configure the DCWoRMS environment providing energy and time execution models. 
     466Based on the models obtained for the considered set of resources and applications we evaluated a set of resource management strategies in terms of energy consumption needed to execute three workloads varying in load intensity (10\%, 30\%, 50\%, 70\% ). 
     467To generate a workload we benefited from the DCWoRMS workload generator tool using the following characteristics. 
     468 
     469\begin {table}[ tp] 
     470 
     471\begin{tabular}{lllllr} 
     472\hline 
     473Characteristic & \multicolumn{4}{c}{Load intensity} & Distribution\\ 
     474& 10  & 30 & 50 & 70  \\ 
     475\hline 
     476Task Count & \multicolumn{4}{c}{1000} & constant\\ 
     477\hline 
     478Task Interval [s] & 3000 & 1200 & 720 & 520 & poisson\\ 
     479\hline 
     480\multirow{8}{*}{Number of cores to run}  & \multicolumn{4}{c}{1} & uniform- 30\%\\ 
     481 & \multicolumn{4}{c}{2} & uniform - 30\%\\ 
     482 & \multicolumn{4}{c}{3} & uniform - 10\%\\ 
     483 & \multicolumn{4}{c}{4} & uniform - 10\%\\ 
     484 & \multicolumn{4}{c}{5} & uniform - 5\%\\ 
     485 & \multicolumn{4}{c}{6} & uniform - 5\%\\ 
     486 & \multicolumn{4}{c}{7} & uniform - 5\%\\ 
     487 & \multicolumn{4}{c}{8} & uniform - 5\%\\ 
     488\hline 
     489\multirow{5}{*}{Application type}  & \multicolumn{4}{c}{Abinit} & uniform- 20\%\\ 
     490 & \multicolumn{4}{c}{C-Ray} & uniform - 20\%\\ 
     491 & \multicolumn{4}{c}{Tar} & uniform - 20\%\\ 
     492 & \multicolumn{4}{c}{Linpack} & uniform - 20\%\\ 
     493 & \multicolumn{4}{c}{FFT} & uniform - 20\%\\ 
     494 
     495\hline 
     496\end{tabular} 
     497\caption {\label{workloadChatacteristics} Workload characteristics} 
     498\end {table} 
     499 
     500Execution time of each application is based on the measurements collected within our testbed. 
     501In all cases we assumed that  tasks are scheduled and served in order of their arrival (FIFO strategy with easy backfilling strategy).  
     502 
     503\subsection{Computational analysis} 
     504 
     505TODO - correct, improve, refactor... 
     506 
     507The following section discusses the results obtained for the workload with load density equal to 70\% in the light of five resource management and scheduling strategies.  
     508The first considered by us policy was the strategy in which tasks were assigned to nodes in random manner with the reservation that they can be assigned only to nodes of the type which the application was possible to execute on and we have the corresponding value of power consumption and execution time. The random strategy is only the reference one and will be later used to compare benefits in terms of energy efficiency resulting from more sophisticated algorithms. Two versions of the strategy were considered. The former one in which unused nodes are not switched off, which case is still the the primary one in many HPC centers and the former one getting more popular due to energy costs in which unused nodes are switched off to reduce the total energy consumption. 
     509 
     510 
     511\begin{figure}[h!] 
     512\centering 
     513\includegraphics[width = 12cm]{fig/70r.png} 
     514\caption{\label{fig:70r} Random strategy} 
     515\end{figure} 
     516 
     517\textbf{total energy usage [kWh]} : 46,883 
     518\textbf{mean power consumption [W]} : 316,17 
     519\textbf{workload completion [s]} : 266 347 
     520 
     521\begin{figure}[h!] 
     522\centering 
     523\includegraphics[width = 6cm]{fig/70rnpm.png} 
     524\caption{\label{fig:70rnpm} Random + switching off unused nodes strategy} 
     525\end{figure} 
     526 
     527\textbf{total energy usage [kWh]} : 36,705 
     528\textbf{mean power consumption [W]} : 247,53 
     529\textbf{workload completion [s]} : 266 347 
     530 
     531The next two evaluate resource management strategies try to decrease the total energy consumption needed to execute the whole workload taking into account differences in applications and hardware profiles.  We tried to match both profiles to find the more energy efficient assignment. In the first case we assumed that there is again no possibility to switch off unused nodes, thus for the whole time needed to execute workload nodes consume at least power for idle state. To obtain the minimal energy consumption, tasks has to be assigned to the nodes of type for which the difference between energy consumption for the node running the application and in the idle state is minimal. 
     532 
     533 
     534\begin{figure}[h!] 
     535\centering 
     536\includegraphics[width = 12cm]{fig/70eo.png} 
     537\caption{\label{fig:70eo} Energy usage optimization strategy} 
     538\end{figure} 
     539 
     540\textbf{total energy usage [kWh]} : 46,305 
     541\textbf{mean power consumption [W]} : 311,94 
     542\textbf{workload completion [s]} : 265 822 
     543 
     544 
     545The next strategy is similar to the previous one, so making the assignment of task to the node, we still take into consideration application and hardware profiles, but in that case we assume that the system supports possibility of switching off unused nodes. In this case the minimal energy consumption is achieved by assigning the task to the node for which the product of power consumption and time of execution is minimal. 
     546 
     547 
     548 
     549\begin{figure}[h!] 
     550\centering 
     551\includegraphics[width = 12cm]{fig/70eonpm.png} 
     552\caption{\label{fig:70eonpm} Energy usage optimization + switching off unused nodes strategy} 
     553\end{figure} 
     554 
     555\textbf{total energy usage [kWh]} : 30,568 
     556\textbf{mean power consumption [W]} : 206,15 
     557\textbf{workload completion [s]} : 264 944 
     558 
     559The last considered by us case is modification of the one of previous strategies taking into account the energy-efficiency of nodes. We assume that tasks do not have deadlines and the only criterion which is taken into consideration is the total energy consumption. All the considered workloads have been executed on the testbed configured for three different possible frequencies of CPUs – the lowest, medium and the highest one. The experiment was intended to check if the benefit of running the workload on less power-consuming frequency of CPU is not leveled by the prolonged time of execution of the workload. 
     560 
     561 
     562\begin{figure}[h!] 
     563\centering 
     564\includegraphics[width = 12cm]{fig/70dfs.png} 
     565\caption{\label{fig:70dfs} Frequency downgrading strategy} 
     566\end{figure} 
     567 
     568\textbf{total energy usage [kWh]} : 77,108 
     569\textbf{mean power consumption [W]} : 260,57 
     570\textbf{workload completion [s]} : 445 886 
     571 
    466572 
    467573.... 
     
    522628% \bibitem{} 
    523629 
    524 \bibitem[15]{fit4green} [15] A. Berl, E. Gelenbe, M. di Girolamo, G. Giuliani, H. de Meer, M.-Q. Dang, K. Pentikousis. Energy-Efficient Cloud Computing. The Computer Journal, 53(7), 2010. 
     630\bibitem{fit4green} A. Berl, E. Gelenbe, M. di Girolamo, G. Giuliani, H. de Meer, M.-Q. Dang, K. Pentikousis. Energy-Efficient Cloud Computing. The Computer Journal, 53(7), 2010. 
    525631 
    526632\bibitem{CloudSim} Rodrigo N. Calheiros, Rajiv Ranjan, Anton Beloglazov, Cesar A. F. De Rose, and Rajkumar Buyya, CloudSim: A Toolkit for Modeling and Simulation of Cloud Computing Environments and Evaluation of Resource Provisioning Algorithms, Software: Practice and Experience (SPE), Volume 41, Number 1, Pages: 23-50, ISSN: 0038-0644, Wiley Press, New York, USA, January, 2011. 
     
    530636\bibitem{DCD_Romonet} http://www.datacenterdynamics.com/blogs/ian-bitterlin/it-does-more-it-says-tin\%E2\%80\%A6 
    531637 
    532 \bibitem[16]{networks} [16] E. Gelenbe and C. Morfopoulou. Power savings in packet networks via optimised routing. Mobile Networks and Applications, 17(1):152–159, February 2012. 
     638\bibitem{networks} E. Gelenbe and C. Morfopoulou. Power savings in packet networks via optimised routing. Mobile Networks and Applications, 17(1):152–159, February 2012. 
    533639 
    534640\bibitem{Ghislain} Ghislain Landry Tsafack Chetsa, Laurent LefÚvre, Jean-Marc Pierson, Patricia Stolf, Georges Da Costa. “DNA-inspired Scheme for Building the Energy Profile of HPC Systems”. In: International Workshop on Energy-Efficient Data Centres, Madrid, Springer, 2012 
    535641 
    536 \bibitem[6]{games} [6] A. Kipp, L. Schubert, J. Liu, T. Jiang, W. Christmann, M. vor dem Berge (2011). Energy Consumption Optimisation in HPC Service Centres, Proceedings of the Second International Conference on Parallel, Distributed, Grid and Cloud Computing for Engineering, B.H.V. Topping and P. Iványi, (Editors), Civil-Comp Press, Stirlingshire, Scotland 
     642\bibitem{games} A. Kipp, L. Schubert, J. Liu, T. Jiang, W. Christmann, M. vor dem Berge (2011). Energy Consumption Optimisation in HPC Service Centres, Proceedings of the Second International Conference on Parallel, Distributed, Grid and Cloud Computing for Engineering, B.H.V. Topping and P. Iványi, (Editors), Civil-Comp Press, Stirlingshire, Scotland 
    537643 
    538644\bibitem{GreenCloud} D. Kliazovich, P. Bouvry, and S. U. Khan, A Packet-level Simulator of Energy- aware Cloud Computing Data Centers, Journal of Supercomputing, vol. 62, no. 3, pp. 1263-1283, 2012 
    539645 
    540 \bibitem[17]{sla} [17] S. Klingert, T. Schulze, C. Bunse. GreenSLAs for the Energy-efficient Management of Data Centres. 2nd International Conference on Energy-Efficient Computing and Networking (e-Energy), 2011. 
     646\bibitem{sla} S. Klingert, T. Schulze, C. Bunse. GreenSLAs for the Energy-efficient Management of Data Centres. 2nd International Conference on Energy-Efficient Computing and Networking (e-Energy), 2011. 
    541647 
    542648\bibitem{GSSIM} S. Bak, M. Krystek, K. Kurowski, A. Oleksiak, W. Piatek and J. Weglarz, GSSIM - a Tool for Distributed Computing Experiments, Scientific Programming Journal, vol. 19, no. 4, pp. 231-251, 2011. 
     
    544650\bibitem{GSSIM_Energy} M. Krystek, K. Kurowski, A. Oleksiak, W. Piatek, Energy-aware simulations with GSSIM. Proceedings of the COST Action IC0804 on Energy Efficiency in Large Scale Distributed Systems, 2010, pp. 55-58. 
    545651 
    546 \bibitem[2]{hintemann} [2] Hintemann, R., Fichter, K. (2010). Materialbestand der Rechenzentren in Deutschland, Eine Bestandsaufnahme zur Ermittlung von Ressourcen- und Energieeinsatz, UBA, Texte, 55/2010 
    547  
    548 \bibitem[8]{koomey} 
    549 [8] Koomey, Jonathan. 2008. "Worldwide electricity used in data centers." Environmental Research Letters. vol. 3, no. 034008. September 23 
     652\bibitem{hintemann} Hintemann, R., Fichter, K. (2010). Materialbestand der Rechenzentren in Deutschland, Eine Bestandsaufnahme zur Ermittlung von Ressourcen- und Energieeinsatz, UBA, Texte, 55/2010 
     653 
     654\bibitem{koomey} 
     655Koomey, Jonathan. 2008. "Worldwide electricity used in data centers." Environmental Research Letters. vol. 3, no. 034008. September 23 
    550656 
    551657 
     
    562668 
    563669 
    564 \bibitem[19]{colt} [19] Colt Modular Data Centre, http://www.colt.net/uk/en/products-services/data-centre-services/modular-data-centre-en.htm 
    565  
    566 \bibitem[20]{coolemall} [20] The CoolEmAll project website, http://coolemall.eu  
    567  
    568 \bibitem[21]{ecocooling} [21] EcoCooling, http://www.ecocooling.org 
    569  
    570 \bibitem[22]{montblanc} [22] The MontBlanc project website,  http://www.montblanc-project.eu/ 
    571  
    572 \bibitem[23]{pue} [23] The Green Grid Data Center Power Efficiency Metrics: PUE and DCiE, http://www.thegreengrid.org/Global/Content/white-papers/The-Green-Grid-Data-Center-Power-Efficiency-Metrics-PUE-and-DCiE 
    573  
    574 \bibitem[24]{sgi} [24] SGI ICE Cube Air, http://www.sgi.com/products/data\_center/ice\_cube\_air/ 
     670\bibitem{colt} Colt Modular Data Centre, http://www.colt.net/uk/en/products-services/data-centre-services/modular-data-centre-en.htm 
     671 
     672\bibitem{coolemall} The CoolEmAll project website, http://coolemall.eu  
     673 
     674\bibitem{ecocooling} EcoCooling, http://www.ecocooling.org 
     675 
     676\bibitem{montblanc} The MontBlanc project website,  http://www.montblanc-project.eu/ 
     677 
     678\bibitem{pue} The Green Grid Data Center Power Efficiency Metrics: PUE and DCiE, http://www.thegreengrid.org/Global/Content/white-papers/The-Green-Grid-Data-Center-Power-Efficiency-Metrics-PUE-and-DCiE 
     679 
     680\bibitem{sgi} SGI ICE Cube Air, http://www.sgi.com/products/data\_center/ice\_cube\_air/ 
    575681 
    576682 
Note: See TracChangeset for help on using the changeset viewer.