Ignore:
Timestamp:
03/18/13 15:37:28 (12 years ago)
Author:
wojtekp
Message:
 
Location:
papers/SMPaT-2012_DCWoRMS
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • papers/SMPaT-2012_DCWoRMS/elsarticle-DCWoRMS.aux

    r743 r959  
    3030\newlabel{fig:arch}{{1}{6}} 
    3131\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Architecture}{6}} 
    32 \@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Workload modeling}{6}} 
     32\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Workload modeling}{7}} 
    3333\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Resource modeling}{7}} 
    3434\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces  Levels of information about jobs}}{8}} 
     
    3636\citation{GSSIM_Energy} 
    3737\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Energy management concept in DCWoRMS}{9}} 
    38 \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.1}Power management}{9}} 
    39 \@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces  Power consumption modeling}}{10}} 
    40 \newlabel{fig:powerModel}{{3}{10}} 
     38\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.1}Power management}{10}} 
    4139\@writefile{toc}{\contentsline {paragraph}{\textbf  {Power profile}}{10}} 
    4240\@writefile{toc}{\contentsline {paragraph}{\textbf  {Power consumption model}}{10}} 
    4341\@writefile{toc}{\contentsline {paragraph}{\textbf  {Power management interface}}{10}} 
     42\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces  Power consumption modeling}}{11}} 
     43\newlabel{fig:powerModel}{{3}{11}} 
    4444\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Application performance modeling}{11}} 
    4545\newlabel{sec:apps}{{3.5}{11}} 
     
    5858\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Resource load}{15}} 
    5959\newlabel{eq:dynamic}{{4}{15}} 
    60 \citation{e2dc12} 
    6160\newlabel{eq:model}{{7}{16}} 
    6261\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Application specific}{16}} 
     
    6463\@writefile{toc}{\contentsline {section}{\numberline {5}Experiments and evaluation}{16}} 
    6564\newlabel{sec:experiments}{{5}{16}} 
     65\citation{e2dc12} 
     66\citation{abinit} 
     67\citation{cray} 
    6668\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Testbed description}{17}} 
    6769\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces  RECS system configuration}}{17}} 
    6870\newlabel{testBed}{{1}{17}} 
    6971\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Evaluated applications}{17}} 
     72\citation{linpack} 
     73\citation{tar} 
     74\citation{fft} 
    7075\@writefile{toc}{\contentsline {subsection}{\numberline {5.3}Models}{18}} 
    7176\newlabel{sec:models}{{5.3}{18}} 
    7277\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces  $P_{cpubase}$ values in Watts}}{18}} 
    7378\newlabel{nodeBasePowerUsage}{{2}{18}} 
    74 \@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces  $P_{app}$ values in Watts}}{18}} 
    75 \newlabel{appPowerUsage}{{3}{18}} 
     79\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces  $P_{app}$ values in Watts}}{19}} 
     80\newlabel{appPowerUsage}{{3}{19}} 
    7681\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces  Power models error in \%}}{19}} 
    7782\newlabel{expPowerModels}{{4}{19}} 
     
    8186\@writefile{toc}{\contentsline {subsection}{\numberline {5.5}Computational analysis}{20}} 
    8287\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.5.1}Random approach}{21}} 
    83 \@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces  Random strategy}}{21}} 
    84 \newlabel{fig:70r}{{6}{21}} 
     88\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.5.2}Energy optimization}{21}} 
     89\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces  Random strategy}}{22}} 
     90\newlabel{fig:70r}{{6}{22}} 
    8591\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces  Random + switching off unused nodes strategy}}{22}} 
    8692\newlabel{fig:70rnpm}{{7}{22}} 
    87 \@writefile{toc}{\contentsline {subsubsection}{\numberline {5.5.2}Energy optimization}{22}} 
    8893\@writefile{lot}{\contentsline {table}{\numberline {6}{\ignorespaces  Measured power of testbed nodes in idle state}}{23}} 
    8994\newlabel{idlePower}{{6}{23}} 
    90 \@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces  Energy usage optimization strategy}}{23}} 
    91 \newlabel{fig:70eo}{{8}{23}} 
    92 \@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces  Energy usage optimization + switching off unused nodes strategy}}{24}} 
    93 \newlabel{fig:70eonpm}{{9}{24}} 
    94 \@writefile{toc}{\contentsline {subsubsection}{\numberline {5.5.3}Downgrading frequency}{25}} 
    95 \@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces  Frequency downgrading strategy}}{25}} 
    96 \newlabel{fig:70dfs}{{10}{25}} 
     95\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces  Energy usage optimization strategy}}{24}} 
     96\newlabel{fig:70eo}{{8}{24}} 
     97\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.5.3}Downgrading frequency}{24}} 
     98\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces  Energy usage optimization + switching off unused nodes strategy}}{25}} 
     99\newlabel{fig:70eonpm}{{9}{25}} 
     100\@writefile{toc}{\contentsline {subsection}{\numberline {5.6}Discussion}{25}} 
     101\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces  Frequency downgrading strategy}}{26}} 
     102\newlabel{fig:70dfs}{{10}{26}} 
    97103\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces  Schedules obtained for Random strategy (left) and Random + lowest frequency strategy (right) for 10\% of system load}}{26}} 
    98104\newlabel{fig:dfsComp}{{11}{26}} 
    99 \@writefile{toc}{\contentsline {subsection}{\numberline {5.6}Discussion}{26}} 
    100 \@writefile{lot}{\contentsline {table}{\numberline {7}{\ignorespaces  Energy usage [kWh] for different level of system load. R - Random, R+NPM - Random + node power management, EO - Energy optimization, EO+NPM - Energy optimization + node power management, R+LF - Random + lowest frequency}}{26}} 
    101 \newlabel{loadEnergy}{{7}{26}} 
     105\@writefile{lot}{\contentsline {table}{\numberline {7}{\ignorespaces  Energy usage [kWh] for different level of system load. R - Random, R+NPM - Random + node power management, EO - Energy optimization, EO+NPM - Energy optimization + node power management, R+LF - Random + lowest frequency}}{27}} 
     106\newlabel{loadEnergy}{{7}{27}} 
    102107\@writefile{lot}{\contentsline {table}{\numberline {8}{\ignorespaces  Makespan [s] for different level of system load. R - Random, R+NPM - Random + node power management, EO - Energy optimization, EO+NPM - Energy optimization + node power management, R+LF - Random + lowest frequency}}{27}} 
    103108\newlabel{loadMakespan}{{8}{27}} 
    104109\@writefile{toc}{\contentsline {section}{\numberline {6}Conclusions and future work}{27}} 
    105110\bibcite{fit4green}{{1}{}{{}}{{}}} 
    106 \newlabel{}{{6}{28}} 
    107111\bibcite{e2dc12}{{2}{}{{}}{{}}} 
    108112\bibcite{CloudSim}{{3}{}{{}}{{}}} 
     
    112116\bibcite{Ghislain}{{7}{}{{}}{{}}} 
    113117\bibcite{games}{{8}{}{{}}{{}}} 
     118\newlabel{}{{6}{29}} 
    114119\bibcite{GreenCloud}{{9}{}{{}}{{}}} 
    115120\bibcite{GSSIM}{{10}{}{{}}{{}}} 
     
    117122\bibcite{koomey}{{12}{}{{}}{{}}} 
    118123\bibcite{fit4green_scheduler}{{13}{}{{}}{{}}} 
    119 \bibcite{GWF}{{14}{}{{}}{{}}} 
    120 \bibcite{SLURM}{{15}{}{{}}{{}}} 
    121 \bibcite{SWF}{{16}{}{{}}{{}}} 
    122 \bibcite{TORQUE}{{17}{}{{}}{{}}} 
    123 \bibcite{colt}{{18}{}{{}}{{}}} 
    124 \bibcite{coolemall}{{19}{}{{}}{{}}} 
    125 \bibcite{ecocooling}{{20}{}{{}}{{}}} 
    126 \bibcite{ff}{{21}{}{{}}{{}}} 
    127 \bibcite{pue}{{22}{}{{}}{{}}} 
    128 \bibcite{sgi}{{23}{}{{}}{{}}} 
    129 \global\NAT@numberstrue 
     124\bibcite{abinit}{{14}{}{{}}{{}}} 
     125\bibcite{cray}{{15}{}{{}}{{}}} 
     126\bibcite{fft}{{16}{}{{}}{{}}} 
     127\bibcite{linpack}{{17}{}{{}}{{}}} 
     128\bibcite{tar}{{18}{}{{}}{{}}} 
     129\bibcite{GWF}{{19}{}{{}}{{}}} 
     130\bibcite{SLURM}{{20}{}{{}}{{}}} 
     131\bibcite{SWF}{{21}{}{{}}{{}}} 
     132\bibcite{TORQUE}{{22}{}{{}}{{}}} 
     133\bibcite{colt}{{23}{}{{}}{{}}} 
     134\bibcite{coolemall}{{24}{}{{}}{{}}} 
     135\bibcite{ecocooling}{{25}{}{{}}{{}}} 
     136\bibcite{ff}{{26}{}{{}}{{}}} 
     137\bibcite{pue}{{27}{}{{}}{{}}} 
     138\bibcite{sgi}{{28}{}{{}}{{}}} 
     139\providecommand\NAT@force@numbers{}\NAT@force@numbers 
  • papers/SMPaT-2012_DCWoRMS/elsarticle-DCWoRMS.fdb_latexmk

    r733 r959  
    11# Fdb version 2 
    2 ["pdflatex"] 1356960904 "elsarticle-DCWoRMS.tex" "elsarticle-DCWoRMS.pdf" "elsarticle-DCWoRMS"  
     2["pdflatex"] 1363615014 "elsarticle-DCWoRMS.tex" "elsarticle-DCWoRMS.pdf" "elsarticle-DCWoRMS"  
    33  "/usr/local/texlive/2010/texmf-dist/tex/context/base/supp-pdf.mkii" 1251025892 71625 fad1c4b52151c234b6873a255b0ad6b3 "" 
    44  "/usr/local/texlive/2010/texmf-dist/tex/generic/oberdiek/etexcmds.sty" 1267408169 5670 cacb018555825cfe95cd1e1317d82c1d "" 
     
    3030  "/usr/local/texlive/2010/texmf-dist/tex/latex/psnfss/upsy.fd" 1137110629 148 2da0acd77cba348f34823f44cabf0058 "" 
    3131  "/usr/local/texlive/2010/texmf-dist/tex/latex/psnfss/upzd.fd" 1137110629 148 b2a94082cb802f90d3daf6dd0c7188a0 "" 
    32   "elsarticle-DCWoRMS.aux" 1356960906 8440 d1679761818322ab4dee3f565206c8c9 "" 
    33   "elsarticle-DCWoRMS.spl" 1356960904 0 d41d8cd98f00b204e9800998ecf8427e "" 
    34   "elsarticle-DCWoRMS.tex" 1356960900 71240 e0bb64f6f7db79f0fc062ce560a2186d "" 
     32  "elsarticle-DCWoRMS.aux" 1363615016 7611 c2f64ef5d12f90829f53d5f763084529 "" 
     33  "elsarticle-DCWoRMS.spl" 1363615014 0 d41d8cd98f00b204e9800998ecf8427e "" 
     34  "elsarticle-DCWoRMS.tex" 1363614992 79877 7aa3809b822e867fddc7d9412c8bf241 "" 
    3535  "elsarticle.cls" 1352447924 26095 ad44f4892f75e6e05dca57a3581f78d1 "" 
    3636  "fig/70dfs.png" 1356617710 212573 e013d714dd1377384ed7793222210051 "" 
     
    3939  "fig/70r.png" 1356617710 288840 3392dd2493597d4774f8e6039cd8eb2d "" 
    4040  "fig/70rnpm.png" 1356619980 61435 4d78965725cd9c1fb907f7d1af9421e7 "" 
    41   "fig/airModel.png" 1353405890 41411 f33639119a59ae1d2eabb277137f0042 "" 
    4241  "fig/arch.png" 1353403503 184917 61b6fddc71ce603779f09b272cd2f164 "" 
    4342  "fig/dfsComp.png" 1356777108 463823 66bdecf7e173c8da341c4e74dc7d8027 "" 
    4443  "fig/jobsStructure.png" 1353403491 128220 3ee11e5fa0d14d8265671725666ef6f7 "" 
    45   "fig/power-fans.png" 1354275938 26789 030a69cecd0eda7c4173d2a6467b132b "" 
     44  "fig/power-fans.png" 1363609674 21709 70f5f8ab64cb6acc13f333943e9388a4 "" 
    4645  "fig/powerModel.png" 1353405780 50716 4660bc6bdf6979777f17fbd82ed99f17 "" 
    4746  "fig/power_default.pdf" 1354275938 69911 0f497aa15baa608eb1f7a90d355bf91e "" 
    48   "fig/temp-fans.png" 1354275938 24359 e12f56fb169d15df2e343daeb0be1b8a "" 
    49   "fig/tempModel.png" 1353418849 63988 4fdd5131fabb84b42453ee67b431fd58 "" 
  • papers/SMPaT-2012_DCWoRMS/elsarticle-DCWoRMS.tex

    r743 r959  
    178178Therefore, there is a need for simulation tools and models that approach the problem from a perspective of end users and take into account all the factors that are critical to understanding and improving the energy efficiency of data centers, in particular, hardware characteristics, applications, management policies, and cooling. 
    179179These tools should support data center designers and operators by answering questions how specific application types, levels of load, hardware specifications, physical arrangements, cooling technology, etc. impact overall data center energy efficiency.  
    180 There are various tools that allow simulation of computing infrastructures. On one hand they include advanced packages for modeling heat transfer and energy consumption in data centers \cite{ff} or tools concentrating on their financial analysis \cite{DCD_Romonet}. On the other hand, there are simulators focusing on computations such as CloudSim \cite{CloudSim}. The CoolEmAll project aims to integrate these approaches and enable advances analysis of data center efficiency taking into account all these aspects \cite{e2dc12}\cite{coolemall}. 
     180There are various tools that allow simulation of computing infrastructures. On one hand they include advanced packages for modeling heat transfer and energy consumption in data centers \cite{ff} or tools concentrating on their financial analysis \cite{DCD_Romonet}. On the other hand, there are simulators focusing on computations such as CloudSim \cite{CloudSim}. The CoolEmAll project aims to integrate these approaches and enable advanced analysis of data center efficiency taking into account all these aspects \cite{e2dc12}\cite{coolemall}. 
    181181 
    182182One of the results of the CoolEmAll project is the Data Center Workload and Resource Management Simulator (DCWoRMS) which enables modeling and simulation of computing infrastructures to estimate their performance, energy consumption, and energy-efficiency metrics for diverse workloads and management policies. 
     
    192192GreenCloud is a C++ based simulation environment for studying the energy-efficiency of cloud computing data centers. CloudSim is a simulation tool that allows modeling of cloud computing environments and evaluation of resource provisioning algorithms. Finally, the DCSG Simulator is a data center cost and energy simulator calculating the power and cooling schema of the data center equipment.  
    193193 
    194 The scope of the aforementioned toolkits concerns the data center environments. However, all of them, except DCWoRMS presented in this paper, imposes and restricts user in terms of modeled resources. GreenCloud defines switches, links and servers that are responsible for task execution and may contain different scheduling strategies. Contrary to what the GreenCloud name may suggest, it does not allow testing the impact of a virtualization-based approaches. CloudSim allows creating a simple resources hierarchy consisting of machines and processors. To simulate a real cloud computing data center, it provides an extra virtualization layer responsible for the VM provisioning process and managing the VM life cycle. In DCSG Simulator user is able to take into account a variety of mechanical and electrical devices as well as the IT equipment and define for each of them numerous factors, including device capacity and efficiency as well as the data center conditions. 
    195  
    196 The general idea behind all of the analyzed tools is to enable studies concerning energy efficiency in distributed infrastructures. GreenCloud approach enables simulation of energy usage associated with computing servers and network components. For example, the server power consumption model implemented in GreenCloud depends on the server state as well as its utilization. The CloudSim framework provides basic models to evaluate energy-conscious provisioning policies. Each computing node can be extended with a power model that estimates the current the power consumption. Within the DCSG Simulator, performance of each data center equipment (facility and IT) is determined by a combination of factors, including workload, local conditions, the manufacturer's specifications and the way in which it is utilized. In DCWoRMS, the plugin idea has been introduced that offers emulating the behavior of computing resources in terms of power consumption. Additionally, it delivers detailed information concerning resource and application characteristics needed to define more sophisticated power draw models. 
    197  
    198 In order to emulate the behavior of real computing systems, green computing simulator should address also the energy-aware resource management. In this term, GreenCloud offers capturing the effects of both of the Dynamic Voltage and Frequency Scaling (DVFS) and Dynamic Power Management schemes. At the links and switches level, it supports downgrading the transmission rate and putting network equipment into a sleep mode. CloudSim comes with a set of predefined and extensible policies that manage the process of VM migrations in order to optimize the power consumption. However, the proposed approach is not sufficient for modeling more sophisticated policies like frequency scaling techniques and managing resource power states. Romonet’s tool is told to implement a set of basic energy-efficient rules that have been developed on the basis of detailed understanding of the data center as a system. The output of this simulation is a set of energy, like PUE, and cost data representing the IT devices. DCWoRMS introduces a dedicated interface that provides methods to obtain the detailed information about each resource and its components energy consumption and allows changing its current energy state. Availability of these interfaces in scheduling plugin supports implementation of various strategies such as centralized energy management, self-management of computing resources and mixed models. 
     194The scope of the aforementioned toolkits concerns the data center environments. However, all of them, except DCWoRMS presented in this paper, imposes and restricts user in terms of modeled resources. GreenCloud defines switches, links and servers that are responsible for task execution and may contain different scheduling strategies. Contrary to what the GreenCloud name may suggest, it does not allow testing the impact of virtualization-based approaches. CloudSim allows creating a simple resources hierarchy consisting of machines and processors. To simulate a real cloud computing data center, it provides an extra virtualization layer responsible for the VM provisioning process and managing the VM life cycle. In DCSG Simulator user is able to take into account a variety of mechanical and electrical devices as well as the IT equipment and define for each of them numerous factors, including device capacity and efficiency as well as the data center conditions. 
     195 
     196The general idea behind all of the analyzed tools is to enable studies concerning energy efficiency in distributed infrastructures. GreenCloud approach enables simulation of energy usage associated with computing servers and network components. For example, the server power consumption model implemented in GreenCloud depends on the server state as well as its utilization. The CloudSim framework provides basic models to evaluate energy-conscious provisioning policies. Each computing node can be extended with a power model that estimates the current power consumption. Within the DCSG Simulator, performance of each data center equipment (facility and IT) is determined by a combination of factors, including workload, local conditions, the manufacturer's specifications and the way in which it is utilized. In DCWoRMS, the plugin idea has been introduced that offers emulating the behavior of computing resources in terms of power consumption. Additionally, it delivers detailed information concerning resource and application characteristics needed to define more sophisticated power draw models. 
     197 
     198In order to emulate the behavior of real computing systems, green computing simulator should address also the energy-aware resource management. In this term, GreenCloud offers capturing the effects of both of the Dynamic Voltage and Frequency Scaling (DVFS) and Dynamic Power Management schemes. At the links and switches level, it supports downgrading the transmission rate and putting network equipment into a sleep mode. CloudSim comes with a set of predefined and extensible policies that manage the process of VM migrations in order to optimize the power consumption. However, the proposed approach is not sufficient for modeling more sophisticated policies like frequency scaling techniques and managing resource power states. DCSG Simulator is told to implement a set of basic energy-efficient rules that have been developed on the basis of detailed understanding of the data center as a system. The output of this simulation is a set of energy metrics, like PUE, and cost data representing the IT devices. DCWoRMS introduces a dedicated interface that provides methods to obtain the detailed information about each resource and its components energy consumption and allows changing its current energy state. Availability of these interfaces in scheduling plugin supports implementation of various strategies such as centralized energy management, self-management of computing resources and mixed models. 
    199199 
    200200In terms of application modeling, all tools, except DCSG Simulator, describe the application with a number of computational and communicational requirements. In addition, GreenCloud and DCWoRMS allow introducing the QoS requirements (typical for cloud computing applications) by taking into account the time constraints during the simulation. DCSG Simulator instead of modeling of the single application, enables the definition of workload that leads to a given utilization level. However, only DCWoRMS supports application performance modeling by not only incorporating simple requirements that are taken into account during scheduling, but also by allowing specification of task execution time. 
    201201 
    202 GreenCloud, CloudSim and DCWoRMS are released as Open Source under the GPL. Romonet’s tool is available under an OSL V3.0 open-source license, however, it can be only accessed by the DCSG members. 
     202GreenCloud, CloudSim and DCWoRMS are released as Open Source under the GPL. DCSG Simulator is available under an OSL V3.0 open-source license, however, it can be only accessed by the DCSG members. 
    203203 
    204204Summarizing, DCWoRMS stands out from other tools due to the flexibility in terms of data center equipment and structure definition. 
    205 Moreover, it allows to associate the energy consumption not only with the current power state and resource utilization but also with the particular set of applications running on it. Moreover, it does not limit the user in defining various types of resource management polices. The main strength of CloudSim lies in implementation of the complex scheduling and task execution schemes involving resource virtualization techniques. However,the energy efficiency aspect is limited only to the VM management, The GreenCloud focuses on data center resources with particular attention to the network infrastructure and the most popular energy management approaches. DCSG simulator allows to take into account also non-computing devices, nevertheless it seems to be hardly customizable to specific workloads and management policies. 
     205Moreover, it allows to associate the energy consumption not only with the current power state and resource utilization but also with the particular set of applications running on it. Moreover, it does not limit the user in defining various types of resource management polices. The main strength of CloudSim lies in implementation of the complex scheduling and task execution schemes involving resource virtualization techniques. However, the energy efficiency aspect is limited only to the VM management. The GreenCloud focuses on data center resources with particular attention to the network infrastructure and the most popular energy management approaches. DCSG simulator allows to take into account also non-computing devices, nevertheless it seems to be hardly customizable to specific workloads and management policies. 
    206206 
    207207\section{DCWoRMS} 
     
    230230Since the applications may vary depending on their nature in terms of their requirements and structure, DCWoRMS provides user flexibility in defining the application model. Thus, considered workloads may have various shapes and levels of complexity that range from multiple independent jobs, through large-scale parallel applications, up to whole workflows containing time dependencies and preceding constraints between jobs and tasks. Each job may consist of one or more tasks and these can be seen as groups of processes. Moreover, DCWoRMS is able to handle rigid and moldable jobs, as well as pre-emptive ones. To model the application profile in more detail,  
    231231DCWoRMS follows the DNA approach proposed in \cite{Ghislain}. Accordingly, each task can be presented as a sequence of phases, which shows the impact of this task on the resources that run it. Phases are then periods of time where the system is stable (load, network, memory) given a certain threshold. Each phase is linked to values of the system that represent a resource consumption profile. Such a stage could be for example described as follows: “60\% CPU, 30\% net, 10\% mem.” 
    232  
    233232Levels of information about incoming jobs are presented in Figure~\ref{fig:jobsStructure}. 
    234233 
     
    250249The main goal of DCWoRMS is to enable researchers evaluation of various resource management policies in diverse computing environments. To this end, it supports flexible definition of simulated resources both on physical (computing resources) as well as on logical (scheduling entities) level. This flexible approach allows modeling of various computing entities consisting of compute nodes, processors and cores. In addition, detailed location of the given resources can be provided in order to group them and arrange into physical structures such as racks and containers. Each of the components may be described by different parameters specifying available memory, storage capabilities, processor speed etc. In this way, it is possible to describe power distribution system and cooling devices. Due to an extensible description, users are able to define a number of experiment-specific and visionary characteristics. Moreover, with every component, dedicated profiles can be associated that determines, among others, power, thermal and air throughput properties. The energy estimation plugin can be bundled with each resource. This allows defining various power models that can be then followed by different computing system components. Details concerning the approach to energy-efficiency modeling in DCWoRMS can be found in the next sections. 
    251250 
    252 Scheduling entities allow providing data related to the brokering or queuing system characteristics. Thus, information about available queues, resources associated with them and their parameters like priority, availability of AR mechanism etc. can be defined. Moreover, allocation policy and task scheduling strategy for each scheduling entity can be introduced in form of the reference to an appropriate plugin. DCWoRMS allows building a hierarchy of schedulers corresponding to the hierarchy of resource components over which the task may be distributed. 
     251Scheduling entities allow providing data related to the brokering or queuing system characteristics. Thus, information about available queues, resources associated with them and their parameters like priority, availability of advance reservation (AR) mechanism etc. can be defined. Moreover, allocation policy and task scheduling strategy for each scheduling entity can be introduced in form of the reference to an appropriate plugin. DCWoRMS allows building a hierarchy of schedulers corresponding to the hierarchy of resource components over which the task may be distributed. 
    253252 
    254253In this way, the DCWoRMS supports simulation of a wide scope of physical and logical architectural patterns that may span from a single computing resource up to whole data centers or geographically distributed grids and clouds. In particular, it supports simulating complex distributed architectures containing models of the whole data centers, containers, racks, nodes, etc. In addition, new resources and distributed computing entities can easily be added to the DCWoRMS environment in order to enhance the functionality of the tool and address more sophisticated requirements. Granularity of such topologies may also differ from coarse-grained to very fine-grained modeling single cores, memory hierarchies and other hardware details. 
     
    280279DCWoRMS is complemented with an interface that allows scheduling plugins to collect detailed power information about computing resource components and to change their power states. It enables performing various operations on the given resources, including dynamically changing the frequency level of a single processor, turning off/on computing resources etc. The activities performed with this interface find a reflection in total amount of energy consumed by the resource during simulation.  
    281280 
    282 Presence of detailed resource usage information, current resource energy state description and functional energy management interface enables an implementation of energy-aware scheduling algorithms. Resource energy consumption becomes in this context an additional criterion in the scheduling process, which use various techniques to decrease energy consumption, e.g. workload consolidation, moving tasks between resources to reduce a number of running resources, dynamic power management, cutting down CPU frequency, and others. 
     281Presence of detailed resource usage information, current resource energy state description and functional energy management interface enables an implementation of energy-aware scheduling algorithms. Resource energy consumption becomes in this context an additional criterion in the scheduling process, which uses various techniques to decrease energy consumption, e.g. workload consolidation, moving tasks between resources to reduce a number of running resources, dynamic power management, cutting down CPU frequency, and others. 
    283282 
    284283%\subsubsection{Air throughput management concept} 
     
    352351 
    353352 
    354 Power function may depend on load and states of resources or even specific applications as explained in \ref{sec:power}. Total energy can be also completed by adding constant power usage of components that does not depend on load or state of resources.  
    355  
    356 In large computing systems which are often characterized by high computational density, total energy consumption of computing nodes is not the only result interesting for researchers. Temperature distribution is getting more and more important as it affects energy consumption of cooling devices, which can reach even half of a total data center energy use. In order to obtain accurate values of temperatures heat transfer simulations based on the Computational Fluid Dynamics (CFD) methods  have to be performed. These methods require as an input (i.e. boundary conditions) a heat dissipated by IT hardware and air throughput generated by fans at servers' outlets. Another approach is based on simplified thermal models that without costly CFD calculations provide rough estimations of temperatures. DCWoRMS enables the use of either approaches. In the the former, the output of simulations including power usage of computing nodes in time and air throughput at node outlets can be passed to CFD solver.  
     353Power function may depend on load and states of resources or even specific applications as explained in Section~\ref{sec:power}. Total energy can be also completed by adding constant power usage of components that does not depend on load or state of resources.  
     354 
     355In large computing systems which are often characterized by high computational density, total energy consumption of computing nodes is not the only result interesting for researchers. Temperature distribution is getting more and more important as it affects energy consumption of cooling devices, which can reach even half of a total data center energy use. In order to obtain accurate values of temperatures heat transfer simulations based on the Computational Fluid Dynamics (CFD) methods  have to be performed. These methods require as an input (i.e. boundary conditions) a heat dissipated by IT hardware and air throughput generated by fans at servers' outlets. Another approach is based on simplified thermal models that without costly CFD calculations provide rough estimations of temperatures. DCWoRMS enables the use of either approaches. In the former, the output of simulations including power usage of computing nodes in time and air throughput at node outlets can be passed to CFD solver.  
    357356%This option is further elaborated in Section \ref{sec:coolemall}. Simplified thermal models required by the latter approach are proposed in \ref{sec:thermal}. 
    358357 
     
    385384also leads to the reduction of $V_{core}$ and thus the power savings 
    386385from the $P\sim V_{core}^{2}$ relation outweigh the increased computation 
    387 time. However, experiments performed on several HPC servers shown that this dependency does not reflect theoretical shape and is often close to linear as presented in Figure \ref{fig:power_freq}. This phenomenon can be explained by impact of other component than CPU and narrow range of available voltages. A good example of impact by other components is power usage of servers with visible influence of fans as illustrated in Figure \ref{fig:fans_P}. 
     386time. However, experiments performed on several HPC servers show that this dependency does not reflect theoretical shape and is often close to linear as presented in Figure \ref{fig:power_freq}. This phenomenon can be explained by impact of other component than CPU and narrow range of available voltages. A good example of impact by other components is power usage of servers with visible influence of fans as illustrated in Figure \ref{fig:fans_P}. 
    388387 
    389388For these reasons, DCWoRMS allows users to define dependencies between power usage and resource states (such as CPU frequency) in the form of tables or arbitrary functions using energy estimation plugins. 
     
    406405\end{equation} 
    407406 
    408 Within DCWoRMS we built in a static approach model that uses common resource states that affect power usage which are the CPU power states. Hence, with each node power state, understood as a possible operating state (p-state), we associated a power consumption value that derives from the averaged values of measurements obtained for different types of application. We distinguish also an idle state. Therefore, the current power usage of the node, can be expressed as: $P = P_{idle} + P_{f}$ where $P$ denotes power consumed by the node, $P_{idle}$ is a power usage of node in idle state and $P_{f}$ stands for power usage of CPU operating at the given frequency level. Additionally, node power states are taken into account to reflect no (or limited) power usage when a node is off. 
     407Within DCWoRMS we built in a static approach model that uses common resource states that affect power usage which are the CPU power states. Hence, with each node power state, understood as a possible operating state (p-state), we associated a power consumption value that derives from the averaged values of measurements obtained for different types of application. We distinguish also an idle state. Therefore, the current power usage of the node can be expressed as: $P = P_{idle} + P_{f}$ where $P$ denotes power consumed by the node, $P_{idle}$ is a power usage of node in idle state and $P_{f}$ stands for power usage of CPU operating at the given frequency level. Additionally, node power states are taken into account to reflect no (or limited) power usage when a node is off. 
    409408 
    410409\subsection{Resource load}  
     
    416415\end{equation} 
    417416 
    418 A typical functional model of power usage can be based on theoretical dependencies between power and parameters such as CPU frequency, voltage, load, memory usage, etc. In this case CPU power usage for core $i$, $P_i$ can be given according to \ref{eq:ohm-law}. Then, the total CPU power can be calculated as a sum of utilized cores:  
     417A typical functional model of power usage can be based on theoretical dependencies between power and parameters such as CPU frequency, voltage, load, memory usage, etc. In this case CPU power usage for core $i$, $P_i$ can be given according to (\ref{eq:ohm-law}). Then, the total CPU power can be calculated as a sum of utilized cores:  
    419418 
    420419\begin{equation} 
     
    472471\section{Experiments and evaluation}\label{sec:experiments} 
    473472 
    474 In this section, we present computational analysis that were conducted to emphasize the role of modelling and simulation in studying computing systems performance. To this end we evaluate the impact of energy-aware resource management policies on overall energy-efficiency of specific workloads on heterogeneous resources. The following sections contain description of the used system, tested application and the results of simulation experiments conducted for the evaluated strategies. 
     473In this section, we present computational analysis that were conducted to emphasize the role of modeling and simulation in studying computing systems performance. To this end we evaluate the impact of energy-aware resource management policies on overall energy-efficiency of specific workloads on heterogeneous resources. The following sections contain description of the used system, tested application and the results of simulation experiments conducted for the evaluated strategies. 
    475474 
    476475\subsection{Testbed description} 
     
    504503\subsection{Evaluated applications} 
    505504 
    506 As mentioned, first we carried out a set of tests on the real hardware used as a CoolEmAll testbed to build the performance and energy profiles of applications.  The following applications were taken into account: 
    507  
    508 \textbf{Abinit} is a widely-used application for computational physics simulating systems made of electrons and nuclei to be calculated within density functional theory. 
    509  
    510 \textbf{C-Ray} is a ray-tracing benchmark that stresses floating point performance of a CPU. Our test is configured with the 'scene' file at 16000x9000 resolution. 
    511  
    512 \textbf{Linpack} benchmark is used to evaluate system floating point performance. It is based on the Gaussian elimination methods that solve a dense N by N system of linear equations. 
    513  
    514 \textbf{Tar} it is a widely used data archiving software]. In our tests the task was to create one compressed file of Linux kernel (version 3.4), which is about 2,3 GB size, using bzip2. 
    515  
    516 \textbf{FFTE} benchmark measures the floating-point arithmetic rate of double precision complex one-dimensional Discrete Fourier Transforms of 1-, 2-, and 3-dimensional sequences of length $2^{p} * 3^{q} * 5^{r}$. In our tests only one core was used to run the application. 
     505As mentioned, first we carried out a set of tests on the real hardware used as a CoolEmAll testbed to build the performance and energy profiles of applications. The following applications were taken into account: 
     506 
     507\textbf{Abinit} \cite{abinit} is a widely-used application for computational physics simulating systems made of electrons and nuclei to be calculated within density functional theory. 
     508 
     509\textbf{C-Ray} \cite{cray} is a ray-tracing benchmark that stresses floating point performance of a CPU. Our test is configured with the 'scene' file at 16000x9000 resolution. 
     510 
     511\textbf{Linpack} \cite{linpack} benchmark is used to evaluate system floating point performance. It is based on the Gaussian elimination methods that solve a dense N by N system of linear equations. 
     512 
     513\textbf{Tar} \cite{tar} it is a widely used data archiving software. In our tests the task was to create one compressed file of Linux kernel (version 3.4), which is about 2.3 GB size, using bzip2. 
     514 
     515\textbf{FFTE} \cite{fft} benchmark measures the floating-point arithmetic rate of double precision complex one-dimensional Discrete Fourier Transforms of 1-, 2-, and 3-dimensional sequences of length $2^{p} * 3^{q} * 5^{r}$. In our tests only one core was used to run the application. 
    517516 
    518517 
     
    583582Obviously, 0\% error in the case of the Mapping model is caused by the use of a tabular data, which for each application stores a specific power usage. Nevertheless, in all models we face possible deviations from the average caused by power usage fluctuations not explained by variables used in models. These deviations reached around 7\% for each case. 
    584583 
    585 For the experimental purposes we decided to use the latter model. Thus, we introduce into the simulation environment exact values obtained within our testbed, to build both the power profiles of applications as well as the application performance models, denoting the their execution times. 
     584For the experimental purposes we decided to use the latter model. Thus, we introduce into the simulation environment exact values obtained within our testbed, to build both the power profiles of applications as well as the application performance models, denoting their execution times. 
    586585 
    587586 
    588587\subsection{Methodology} 
    589588 
    590 Every chosen application / benchmark was executed on each type of node, for all frequencies supported by the CPU and for different levels of parallelization (number of cores). To eliminate the problem with assessing which part of the power consumption comes from which application, in case when more then one application is ran on the node, the queuing system (SLURM) was configured to run jobs in exclusive mode (one job per node). Such configuration is often used for at least dedicated part of HPC resources. The advantage of the exclusive mode scheduling policy consist in that the job gets all the resources of the assigned nodes for optimal parallel performance and applications running on the same node do not influence each other. For every configuration of application, type of node and CPU frequency we measure the average power consumption of the node and the execution time. The aforementioned values  were used to configure the DCWoRMS environment providing energy and time execution models. 
     589Every chosen application / benchmark was executed on each type of node, for all frequencies supported by the CPU and for different levels of parallelization (number of cores). To eliminate the problem with assessing which part of the power consumption comes from which application, in case when more then one application is ran on the node, the queuing system (SLURM) was configured to run jobs in exclusive mode (one job per node). Such configuration is often used for at least dedicated part of HPC resources. The advantage of the exclusive mode scheduling policy consists in that the job gets all the resources of the assigned nodes for optimal parallel performance and applications running on the same node do not influence each other. For every configuration of application, type of node and CPU frequency we measure the average power consumption of the node and the execution time. The aforementioned values  were used to configure the DCWoRMS environment providing energy and time execution models. 
    591590Based on the models obtained for the considered set of resources and applications we evaluated a set of resource management strategies in terms of energy consumption needed to execute three workloads varying in load intensity (10\%, 30\%, 50\%, 70\%). 
    592591To generate a workload we used the DCWoRMS workload generator tool using the following characteristics (Table~\ref{workloadCharacteristics}). 
     
    633632\subsubsection{Random approach}  
    634633 
    635 The first considered by us policy was the Random (R) strategy in which tasks were assigned to nodes in random manner with the reservation that they can be assigned only to nodes of the type which the application was possible to execute on and we have the corresponding value of power consumption and execution time. The Random strategy is only the reference one and will be later used to compare benefits in terms of energy efficiency resulting from more sophisticated algorithms. Criteria values are as follows: \textbf{total energy usage}: 46,883 kWh, \textbf{workload completion time}: 533 820 s. 
     634The first considered by us policy was the Random (R) strategy in which tasks were assigned to nodes in a random manner with the condition that they can be assigned only to nodes of the type on which the application was able to run (in other words - we had the corresponding value of power consumption and execution time). The Random strategy is only the reference one and will be later used to compare benefits in terms of energy efficiency resulting from more sophisticated algorithms. Criteria values are as follows: \textbf{total energy usage}: 46.883 kWh, \textbf{workload completion time}: 533 820 s. 
    636635Figure~\ref{fig:70r} presents the energy consumption, load of the system and obtained schedule, respectively. 
    637636 
     
    643642\end{figure} 
    644643 
    645 In the second version of this strategy, which is getting more popular due to energy costs, we switched of unused nodes to reduce the total energy consumption. In the previous one, unused nodes are not switched off, which case is still the primary one in many HPC centers.  
     644In the second version of this strategy, which is getting more popular due to energy costs, we switched off unused nodes to reduce the total energy consumption. In the previous one, unused nodes are not switched off, which case is still the primary one in many HPC centers.  
    646645 
    647646\begin{figure}[h!] 
     
    651650\end{figure} 
    652651 
    653 In this version of experiment we neglected additional cost and time necessary to change the power state of resources. As can be observed in the power consumption chart in the Figure~\ref{fig:70rnpm}, switching of unused nodes led to decrease of the total energy consumption. As expected, with respect to the makespan criterion, both approaches perform equally reaching \textbf{workload completion time}: 533 820 s. However, the pure random strategy was significantly outperformed in terms of energy usage, by the policy with additional node power management with its \textbf{total energy usage}: 36,705 kWh. The overall energy savings reached 22\%.  
     652In this version of experiment we neglected additional cost and time necessary to change the power state of resources. As can be observed in the power consumption chart in the Figure~\ref{fig:70rnpm}, switching off unused nodes led to decrease of the total energy consumption. As expected, with respect to the makespan criterion, both approaches perform equally reaching \textbf{workload completion time}: 533 820 s. However, the pure random strategy was significantly outperformed in terms of energy usage, by the policy with additional node power management with its \textbf{total energy usage}: 36.705 kWh. The overall energy savings reached 22\%.  
    654653 
    655654\subsubsection{Energy optimization}  
    656655 
    657 The next two evaluate resource management strategies try to decrease the total energy consumption (EO) needed to execute the whole workload taking into account differences in applications and hardware profiles. We tried to match both profiles to find the more energy efficient assignment. In the first case we assumed that there is again no possibility to switch off unused nodes, thus for the whole time needed to execute workload nodes consume at least power for idle state. To obtain the minimal energy consumption, tasks has to be assigned to the nodes of type for which the difference between energy consumption for the node running the application and in the idle state is minimal. The power usage measured in idle state for three types of nodes is gathered in the Table~\ref{idlePower}. 
     656The next two evaluated resource management strategies try to decrease the total energy consumption (EO) caused by the execution of the whole workload. They take into account differences in applications and hardware profiles by trying to find the most energy efficient assignment. In the first case we assumed that there is again no possibility to switch off unused nodes, thus for the whole time needed to execute workload nodes consume at least power for idle state. To obtain the minimal energy consumption, tasks has to be assigned to the nodes of type for which the difference between energy consumption for the node running the application and in the idle state is minimal. The power usage measured in idle state for three types of nodes is gathered in the Table~\ref{idlePower}. 
    658657 
    659658\begin {table}[h!] 
     
    663662Type & Power usage in idle state [W]  \\ 
    664663\hline 
    665  Intel i7 & 11,5 \\ 
     664 Intel i7 & 11.5 \\ 
    666665AMD Fusion & 19 \\ 
    667666Atom D510  & 10 \\ 
     
    679678\end{figure} 
    680679 
    681 This allocation strategy, leads to slight deterioration of makespan criterion, resulting in \textbf{workload completion time} equal to 534 400 s. Nevertheless, the \textbf{total energy usage} is reduced, achieving: 46,305 kWh. 
     680This allocation strategy, leads to slight deterioration of makespan criterion, resulting in \textbf{workload completion time} equal to 534 400 s. Nevertheless, the \textbf{total energy usage} is reduced, achieving: 46.305 kWh. 
    682681 
    683682 
     
    691690\end{figure} 
    692691 
    693 Estimated \textbf{total energy usage} of the system is 30,568 kWh. As we can see, this approach significantly improved the value of this criterion, comparing to the previous policies. Moreover, the proposed allocation strategy does not worsen the \textbf{workload completion time} criterion, for which the resulting value is equal to 533 820 s. 
     692Estimated \textbf{total energy usage} of the system is 30.568 kWh. As we can see, this approach significantly improved the value of this criterion, comparing to the previous policies. Moreover, the proposed allocation strategy does not worsen the \textbf{workload completion time} criterion, for which the resulting value is equal to 533 820 s. 
    694693 
    695694\subsubsection{Downgrading frequency}  
    696695 
    697 The last considered by us case is modification of the random strategy. We assume that tasks do not have deadlines and the only criterion which is taken into consideration is the total energy consumption. In this experiment we configured the simulated infrastructure for the lowest possible frequencies of CPUs (LF). The experiment was intended to check if the benefit of running the workload on less power-consuming frequency of CPU is not leveled by the prolonged time of execution of the workload. The values of the evaluated criteria are as follows: \textbf{workload completion time}: 1 065 356 s and \textbf{total energy usage}: 77,109 kWh. As we can see, for the given load of the system (70\%), the cost of running the workload that requires almost twice more time, can not be compensate by the lower power draw. Moreover, as it can be observed on the charts in Figure~\ref{fig:70dfs}, the execution times on the slowest nodes (Atom D510) visibly exceeds the corresponding values on other servers. 
     696The last case considered by us is modification of the random strategy. We assume that tasks do not have deadlines and the only criterion which is taken into consideration is the total energy consumption. In this experiment we configured the simulated infrastructure for the lowest possible frequencies of CPUs (LF). The experiment was intended to check if the benefit of running the workload on less power-consuming frequency of CPU is not leveled by the prolonged time of execution of the workload. The values of the evaluated criteria are as follows: \textbf{workload completion time}: 1 065 356 s and \textbf{total energy usage}: 77.109 kWh. As we can see, for the given load of the system (70\%), the cost of running the workload that requires almost twice more time, can not be compensate by the lower power draw. Moreover, as it can be observed on the charts in Figure~\ref{fig:70dfs}, the execution times on the slowest nodes (Atom D510) visibly exceeds the corresponding values on other servers. 
    698697         
    699698\begin{figure}[h!] 
     
    704703 
    705704 
    706 As we were looking for the trade-off between total completion time and energy usage, we were searching for the workload load level that can benefit from the lower system performance in terms of energy-efficiency. For the frequency downgrading policy, we noticed the improvement on the energy usage criterion only for the workload resulting in 10\% system load. For this threshold we observed that slowdown in task execution does not affect the subsequent tasks in the system and thus total completion time of the whole workload. T 
     705As we were looking for the trade-off between total completion time and energy usage, we were searching for the workload load level that can benefit from the lower system performance in terms of energy-efficiency. For the frequency downgrading policy, we noticed the improvement on the energy usage criterion only for the workload resulting in 10\% system load. For this threshold we observed that slowdown in task execution does not affect the subsequent tasks in the system and thus the total completion time of the whole workload. 
    707706         
    708707Figure~\ref{fig:dfsComp} shows schedules obtained for Random and Random + lowest frequency strategy.  
     
    716715 
    717716\subsection{Discussion} 
    718 The following tables: Table~\ref{loadEnergy} and Table~\ref{loadMakespan} contain the values of evaluation criteria (total energy usage and makespan respectively) gathered for all investigated workloads. 
     717The following tables Table~\ref{loadEnergy} and Table~\ref{loadMakespan} contain the values of evaluation criteria (total energy usage and makespan respectively) gathered for all investigated workloads. 
    719718 
    720719\begin {table}[h!] 
     
    725724Load  & R & R+NPM & EO & EO+NPM & R+LF\\ 
    726725\hline 
    727 10\% & 241,337 &        37,811 & 239,667 & 25,571 & 239,278 \\ 
    728 30\% &89,853 & 38,059 & 88,823 & 25,595 & 90,545 \\ 
    729 50\% &59,112 & 36,797 & 58,524 & 26,328 & 76,020 \\ 
    730 70\% &46,883 & 36,705 & 46,305 & 30,568 & 77,109 \\ 
     72610\% & 241.337 &        37.811 & 239.667 & 25.571 & 239.278 \\ 
     72730\% &89.853 & 38.059 & 88.823 & 25.595 & 90.545 \\ 
     72850\% &59.112 & 36.797 & 58.524 & 26.328 & 76.020 \\ 
     72970\% &46.883 & 36.705 & 46.305 & 30.568 & 77.109 \\ 
    731730\hline 
    732731\end{tabular} 
     
    750749\end {table} 
    751750 
    752 Referring to the Table~\ref{loadEnergy}, one should easily note that gain from switching off unused nodes decreases with the increasing workload density. In general, for the highly loaded system such policy does not find an application due to the cost related to this process and relatively small benefits. Another interesting conclusion, reefers to the poor result for Random strategy with downgrading the frequency approach. The lack of improvement on the energy usage criterion for higher system load can be explained by the relatively small or no benefit obtained for prolonging the task execution, and thus, maintaining the node in working state. The cost of longer workload completion, can not be compensate by the very little energy savings derived from the lower operating state of node. 
     751Referring to the Table~\ref{loadEnergy}, one should easily note that gain from switching off unused nodes decreases with the increasing workload density. In general, for the highly loaded system such policy does not find an application due to the cost related to this process and relatively small benefits. Another interesting conclusion, refers to the poor result for Random strategy with downgrading the frequency approach. The lack of improvement on the energy usage criterion for higher system load can be explained by the relatively small or no benefit obtained for prolonging the task execution, and thus, maintaining the node in working state. The cost of longer workload completion, can not be compensate by the very little energy savings derived from the lower operating state of node. 
    753752We also demonstrated differences between power usage models. They span from rough static approach to accurate application specific models. However, the latter can be difficult or even infeasible to use as it requires real measurements for specific applications beforehand. This issue can be partially resolved by introducing application profiles and classification, which can deteriorate the accuracy though. This issue is begin studied more deeply within CoolEmAll project.  
    754753 
     
    778777 
    779778\section*{Acknowledgement} 
    780 The results presented in this paper are partially funded by the European Commission under contract 288701 through the project CoolEmAll and by grants 
    781 from Polish National Science Center: a grant under award number 636/N-COST/09/2010/0 and a grant under award number 5790/B/T02/2010/38. 
     779The results presented in this paper are partially funded by the European Commission under contract 288701 through the project CoolEmAll and by grants from Polish National Science Center: a grant under award number 636/N-COST/09/2010/0 and a grant under award number 5790/B/T02/2010/38. 
    782780 
    783781 
     
    820818\bibitem{fit4green} A. Berl, E. Gelenbe, M. di Girolamo, G. Giuliani, H. de Meer, M.-Q. Dang, K. Pentikousis. Energy-Efficient Cloud Computing. The Computer Journal, 53(7), 2010. 
    821819 
    822 \bibitem{e2dc12} Micha vor dem Berge, Georges Da Costa, Andreas Kopecki, Ariel Oleksiak, Jean-Marc Pierson, Tomasz Piontek, Eugen Volk, and Stefan Wesner. "Modeling and Simulation of Data Center Energy-Efficiency in CoolEmAll". Energy Efficient Data Centers, Lecture Notes in Computer Science Volume 7396, 2012, pp 25-36 
    823  
    824 \bibitem{CloudSim} Rodrigo N. Calheiros, Rajiv Ranjan, Anton Beloglazov, Cesar A. F. De Rose, and Rajkumar Buyya, CloudSim: A Toolkit for Modeling and Simulation of Cloud Computing Environments and Evaluation of Resource Provisioning Algorithms, Software: Practice and Experience (SPE), Volume 41, Number 1, Pages: 23-50, ISSN: 0038-0644, Wiley Press, New York, USA, January, 2011. 
     820\bibitem{e2dc12} M. vor dem Berge, G. Da Costa, A. Kopecki, A. Oleksiak, J-M. Pierson, T. Piontek, E. Volk, S. Wesner. Modeling and Simulation of Data Center Energy-Efficiency in CoolEmAll. Energy Efficient Data Centers, Lecture Notes in Computer Science Volume 7396, 2012, pp 25-36. 
     821 
     822\bibitem{CloudSim} R. N. Calheiros, R. Ranjan, A. Beloglazov, C. A. F. De Rose, R. Buyya. CloudSim: A Toolkit for Modeling and Simulation of Cloud Computing Environments and Evaluation of Resource Provisioning Algorithms, Software: Practice and Experience (SPE), Volume 41, Number 1, Pages: 23-50, ISSN: 0038-0644, Wiley Press, New York, USA, January, 2011. 
    825823 
    826824\bibitem{DCSG} http://dcsg.bcs.org/welcome-dcsg-simulator 
     
    828826\bibitem{DCD_Romonet} http://www.datacenterdynamics.com/blogs/ian-bitterlin/it-does-more-it-says-tin\%E2\%80\%A6 
    829827 
    830 \bibitem{networks} E. Gelenbe and C. Morfopoulou. Power savings in packet networks via optimised routing. Mobile Networks and Applications, 17(1):152–159, February 2012. 
    831  
    832 \bibitem{Ghislain} Ghislain Landry Tsafack Chetsa, Laurent LefÚvre, Jean-Marc Pierson, Patricia Stolf, Georges Da Costa. “DNA-inspired Scheme for Building the Energy Profile of HPC Systems”. In: International Workshop on Energy-Efficient Data Centres, Madrid, Springer, 2012 
    833  
    834 \bibitem{games} A. Kipp, L. Schubert, J. Liu, T. Jiang, W. Christmann, M. vor dem Berge (2011). Energy Consumption Optimisation in HPC Service Centres, Proceedings of the Second International Conference on Parallel, Distributed, Grid and Cloud Computing for Engineering, B.H.V. Topping and P. Iványi, (Editors), Civil-Comp Press, Stirlingshire, Scotland 
    835  
    836 \bibitem{GreenCloud} D. Kliazovich, P. Bouvry, and S. U. Khan, A Packet-level Simulator of Energy- aware Cloud Computing Data Centers, Journal of Supercomputing, vol. 62, no. 3, pp. 1263-1283, 2012 
     828\bibitem{networks} E. Gelenbe, C. Morfopoulou. Power savings in packet networks via optimised routing. Mobile Networks and Applications, 17(1):152–159, February 2012. 
     829 
     830\bibitem{Ghislain} G. L. T. Chetsa, L. Lef{\'e}vre, J-M. Pierson, P. Stolf, G. Da Costa. DNA-inspired Scheme for Building the Energy Profile of HPC Systems. In: International Workshop on Energy-Efficient Data Centres, Madrid, Springer, 2012. 
     831 
     832\bibitem{games} A. Kipp, L. Schubert, J. Liu, T. Jiang, W. Christmann, M. vor dem Berge. Energy Consumption Optimisation in HPC Service Centres, Proceedings of the Second International Conference on Parallel, Distributed, Grid and Cloud Computing for Engineering, B.H.V. Topping and P. Iv{\'a}nyi, (Editors), Civil-Comp Press, Stirlingshire, Scotland, 2011. 
     833 
     834\bibitem{GreenCloud} D. Kliazovich, P. Bouvry, and S. U. Khan, A Packet-level Simulator of Energy-aware Cloud Computing Data Centers, Journal of Supercomputing, vol. 62, no. 3, pp. 1263-1283, 2012. 
    837835 
    838836%\bibitem{sla} S. Klingert, T. Schulze, C. Bunse. GreenSLAs for the Energy-efficient Management of Data Centres. 2nd International Conference on Energy-Efficient Computing and Networking (e-Energy), 2011. 
     
    844842%\bibitem{hintemann} Hintemann, R., Fichter, K. (2010). Materialbestand der Rechenzentren in Deutschland, Eine Bestandsaufnahme zur Ermittlung von Ressourcen- und Energieeinsatz, UBA, Texte, 55/2010 
    845843 
    846 \bibitem{koomey} 
    847 Koomey, Jonathan. 2008. "Worldwide electricity used in data centers." Environmental Research Letters. vol. 3, no. 034008. September 23 
    848  
    849 \bibitem{fit4green_scheduler} Olli MÀmmelÀ, Mikko Majanen, Robert Basmadjian, Hermann De Meer, André Giesler, Willi Homberg, Energy-aware job scheduler for high-performance computing, Computer Science - Research and Development 
    850 November 2012, Volume 27, Issue 4, pp 265-275 
     844\bibitem{koomey} J. Koomey. 2008. Worldwide electricity used in data centers. Environmental Research Letters. vol. 3, no. 034008. September 23. 
     845 
     846\bibitem{fit4green_scheduler} O. M{\"a}mmel{\"a}, M. Majanen, R. Basmadjian, H. De Meer, A. Giesler, W. Homberg, Energy-aware job scheduler for high-performance computing, Computer Science - Research and Development, November 2012, Volume 27, Issue 4, pp 265-275. 
    851847 
    852848% web links 
    853849 
     850\bibitem{abinit} Abinit. http://www.abinit.org/, 2013. 
     851 
     852\bibitem{cray} C-ray – ray-tracing benchmark. http://code.google.com/p/cray/, 2013. 
     853 
     854\bibitem{fft} FFTE: A Fast Fourier Transform Package. http://www.ffte.jp/, 2013. 
     855 
     856\bibitem{linpack} Linpack. http://www.netlib.org/linpack/, 2013. 
     857 
     858\bibitem{tar} Tar – data archiving software. http://www.gnu.org/software/tar/, 2013. 
     859 
    854860\bibitem{GWF} http://gwa.ewi.tudelft.nl/ 
    855861 
Note: See TracChangeset for help on using the changeset viewer.