Commit ab2fca44 authored by Jérome Perrin's avatar Jérome Perrin

Remove obsolete GUI folder now everything is plugin

parent 737a19f5
from copy import copy
import json
import time
import random
import operator
import xmlrpclib
from dream.simulation.GUI.Default import Simulation as DefaultSimulation
from dream.simulation.Queue import Queue
from dream.simulation.Globals import getClassFromName
class Simulation(DefaultSimulation):
def getConfigurationDict(self):
conf = DefaultSimulation.getConfigurationDict(self)
conf["Dream-Configuration"]["property_list"].append(
{ "id": "numberOfGenerations",
"type": "number",
"name": "Number of generations",
"_class": "Dream.Property",
"_default": 10} )
conf["Dream-Configuration"]["property_list"].append(
{ "id": "numberOfAntsPerGenerations",
"type": "number",
"name": "Number of ants per generation",
"_class": "Dream.Property",
"_default": 20} )
conf["Dream-Configuration"]["property_list"].append(
{ "id": "numberOfSolutions",
"type": "number",
"name": "Number of solutions",
"_class": "Dream.Property",
"_default": 4} )
conf["Dream-Configuration"]["property_list"].append(
{ "id": "distributorURL",
"type": "string",
"name": "Distributor URL",
"description": "URL of an ERP5 Distributor, see "
"https://github.com/erp5/erp5/tree/dream_distributor",
"_class": "Dream.Property",
"_default": ''} )
return conf
def _preprocess(self, data):
"""Override in subclass to preprocess data.
"""
return data
def _calculateAntScore(self, ant):
"""Calculate the score of this ant.
"""
totalDelay=0 #set the total delay to 0
jsonData=ant['result'] #read the result as JSON
elementList = jsonData['elementList'] #find the route of JSON
#loop through the elements
for element in elementList:
elementClass=element['_class'] #get the class
#id the class is Job
if elementClass=='Dream.Job':
results=element['results']
delay = float(results.get('delay', "0"))
# A negative delay would mean we are ahead of schedule. This
# should not be considered better than being on time.
totalDelay += max(delay, 0)
return totalDelay
def run(self, data):
data = self._preprocess(data)
distributor_url = data['general']['distributorURL']
distributor = None
if distributor_url:
distributor = xmlrpclib.Server(distributor_url)
tested_ants = set()
start = time.time() # start counting execution time
# the list of options collated into a dictionary for ease of referencing in
# ManPy
collated = dict()
for node_id, node in data['nodes'].items():
node_class = getClassFromName(node['_class'])
if issubclass(node_class, Queue):
collated[node_id] = list(node_class.getSupportedSchedulingRules())
max_results = data['general']['numberOfSolutions']
ants = [] #list of ants for keeping track of their performance
# Number of times new ants are to be created, i.e. number of generations (a
# generation can have more than 1 ant)
for i in range(data["general"]["numberOfGenerations"]):
scenario_list = [] # for the distributor
# number of ants created per generation
for j in range(data["general"]["numberOfAntsPerGenerations"]):
# an ant dictionary to contain rule to queue assignment information
ant = {}
# for each of the machines, rules are randomly picked from the
# options list
for k in collated.keys():
ant[k] = random.choice(collated[k])
# TODO: function to calculate ant id. Store ant id in ant dict
ant_key = repr(ant)
# if the ant was not already tested, only then test it
if ant_key not in tested_ants:
tested_ants.add(ant_key)
# set scheduling rule on queues based on ant data
ant_data = copy(data)
for k, v in ant.items():
ant_data["nodes"][k]['schedulingRule'] = v
ant['key'] = ant_key
ant['input'] = ant_data
scenario_list.append(ant)
if distributor is None:
# synchronous
for ant in scenario_list:
ant['result'] = DefaultSimulation.runOneScenario(self, ant['input'])
else: # asynchronous
job_id = distributor.requestSimulationRun(
[json.dumps(x) for x in scenario_list])
print "Job registered", job_id
while True:
time.sleep(1.)
result_list = distributor.getJobResult(job_id)
# The distributor returns None when calculation is still ongoing,
# or the list of result in the same order.
if result_list is not None:
print "Job terminated"
break
for ant, result in zip(scenario_list, result_list):
ant['result'] = json.loads(result)
for ant in scenario_list:
ant['score'] = self._calculateAntScore(ant)
ants.extend(scenario_list)
# remove ants that outputs the same schedules
ants_without_duplicates = dict()
for ant in ants:
ant_result = copy(ant['result'])
ant_result['general'].pop('totalExecutionTime', None)
ant_result = json.dumps(ant_result, sort_keys=True)
ants_without_duplicates[ant_result] = ant
# The ants in this generation are ranked based on their scores and the
# best (max_results) are selected
ants = sorted(ants_without_duplicates.values(),
key=operator.itemgetter('score'))[:max_results]
for l in ants:
# update the options list to ensure that good performing queue-rule
# combinations have increased representation and good chance of
# being selected in the next generation
for m in collated.keys():
# e.g. if using EDD gave good performance for Q1, then another
# 'EDD' is added to Q1 so there is a higher chance that it is
# selected by the next ants.
collated[m].append(l[m])
print "ACO finished, execution time %0.2fs" % (time.time() - start)
return ants
import copy
import json
import time
import random
import operator
from dream.simulation.GUI.Shifts import Simulation as ShiftsSimulation
from dream.simulation.GUI.Default import schema
class Simulation(ShiftsSimulation):
def getConfigurationDict(self):
conf = ShiftsSimulation.getConfigurationDict(self)
conf['Dream-LineClearance'] = {
"_class": "Dream.LineClearance",
"name": "Clearance",
"short_id": "C",
"property_list": conf['Dream-Queue']['property_list']}
batch_source_entity = copy.deepcopy(schema["entity"])
batch_source_entity['_default'] = "Dream.Batch"
conf['Dream-BatchSource'] = {
"_class": "Dream.BatchSource",
"name": "Source",
"short_id": "S",
"property_list": [schema['interarrivalTime'],
batch_source_entity,
schema['batchNumberOfUnits']]
}
zeroProcessingTime = copy.deepcopy(schema['processingTime'])
for prop in zeroProcessingTime['property_list']:
if prop['id'] == 'mean':
prop['_default'] = 0.0
perUnitProcessingTime = copy.deepcopy(schema['processingTime'])
for prop in perUnitProcessingTime['property_list']:
if prop['id'] == 'mean':
prop['description'] = "Processing time per unit"
conf['Dream-BatchDecompositionStartTime'] = {
"_class": "Dream.BatchDecompositionStartTime",
"name": "Decomposition",
"short_id": "D",
"property_list": [zeroProcessingTime, schema['numberOfSubBatches'] ]
}
conf['Dream-BatchReassembly'] = {
"_class": "Dream.BatchReassembly",
"name": "Reassembly",
"short_id": "R",
"property_list": [zeroProcessingTime, schema['numberOfSubBatches'] ]
}
conf['Dream-BatchScrapMachine'] = {
"_class": "Dream.BatchScrapMachine",
"name": "Station",
"short_id": "St",
"property_list": [perUnitProcessingTime, schema['failures'] ]
}
conf['Dream-EventGenerator'] = {
"_class": "Dream.EventGenerator",
"name": "Attainment",
"short_id": "A",
"property_list": [schema['start'], schema['stop'], schema['duration'],
schema['interval'], schema['method'], schema['argumentDict']]
}
conf["Dream-Configuration"]["gui"]["exit_stat"] = 1
conf["Dream-Configuration"]["gui"]["debug_json"] = 1
conf["Dream-Configuration"]["gui"]["shift_spreadsheet"] = 1
# some more global properties
conf["Dream-Configuration"]["property_list"].append( {
"id": "throughputTarget",
"name": "Daily Throughput Target",
"description": "The daily throughput target in units.",
"type": "number",
"_class": "Dream.Property",
"_default": 10 })
# remove tools that does not make sense here
conf.pop('Dream-Machine')
conf.pop('Dream-Repairman')
conf.pop('Dream-Source')
return conf
from copy import copy
import json
import time
import random
import operator
from datetime import datetime
from collections import defaultdict
from dream.simulation.GUI.Default import Simulation as DefaultSimulation
from dream.simulation.GUI.Default import schema
class Simulation(DefaultSimulation):
def getConfigurationDict(self):
conf = DefaultSimulation.getConfigurationDict(self)
conf["Dream-AbstractCapacityStation"] = {
"property_list": [
{
"id": "isAssembly",
"name": "Is an assembly station ?",
"description": "Is this station an assembly ? Yes: 1, No: 0",
"type": "number",
"_class": "Dream.Property",
"_default": 0
},
],
"_class": 'Dream.AbstractCapacityStation',
"name": 'Station',
"short_id": "CS",
}
conf["Dream-Configuration"]["gui"]["capacity_by_project_spreadsheet"] = 1
conf["Dream-Configuration"]["gui"]["capacity_by_station_spreadsheet"] = 1
conf["Dream-Configuration"]["gui"]["station_utilisation_graph"] = 0
conf["Dream-Configuration"]["gui"]["capacity_utilisation_graph"] = 1
conf["Dream-Configuration"]["gui"]["job_schedule_spreadsheet"] = 1
conf["Dream-Configuration"]["gui"]["job_gantt"] = 1
conf["Dream-Configuration"]["gui"]["queue_stat"] = 0
conf["Dream-Configuration"]["gui"]["exit_stat"] = 0
conf["Dream-Configuration"]["gui"]["debug_json"] = 1
# remove tools that does not make sense here
conf.pop('Dream-Machine')
conf.pop('Dream-Queue')
conf.pop('Dream-Exit')
conf.pop('Dream-Repairman')
conf.pop('Dream-Source')
conf.pop('Dream-EventGenerator')
return conf
def _preprocess(self, in_data):
data = copy(DefaultSimulation._preprocess(self, in_data))
new_data = copy(data)
# remove not needed spreadsheet not to polute json
new_data.pop('shift_spreadsheet', None)
new_data.pop('wip_part_spreadsheet', None)
# read the spreadsheets
# a mapping station id -> list of interval capacity
available_capacity_by_station = defaultdict(list)
capacity_by_station_spreadsheet = data.pop('capacity_by_station_spreadsheet')
station_id_list = copy([x for x in capacity_by_station_spreadsheet[0][1:] if x])
for line in capacity_by_station_spreadsheet[1:]:
for station_id, capacity in zip(station_id_list, line[1:]):
available_capacity_by_station[station_id].append(float(capacity or 0))
assert set(station_id_list) == set(data['nodes'].keys()), "Check stations ids in capacity spreadsheet"
# a mapping project id -> mapping station_id -> required capacity
required_capacity_by_project = dict()
for project_id, station_sequence, requirement_sequence \
in data['capacity_by_project_spreadsheet'][1:]:
if project_id:
required_capacity_by_project[project_id] = {}
for idx, capacity_station in enumerate(station_sequence.split('-')):
capacity_station = '%s_Station' % capacity_station.strip()
required_capacity_by_project[project_id][capacity_station] = \
float(requirement_sequence.split('-')[idx])
# a mapping project id -> first station
first_station_by_project = dict()
for project_id, station_sequence, requirement_sequence \
in data['capacity_by_project_spreadsheet'][1:]:
if station_sequence:
first_station_by_project[project_id] = station_sequence.split('-')[0]
# implicitly add a Queue for wip
assert 'Qstart' not in new_data['nodes'], "reserved ID used"
wip = []
for project, capacityRequirementDict in \
required_capacity_by_project.items():
wip.append(
dict(_class='Dream.CapacityProject',
id=project,
name=project,
capacityRequirementDict=capacityRequirementDict))
new_data['nodes']['QStart'] = dict(
_class='Dream.Queue',
id='QStart',
name='Start Queue',
capacity=-1,
wip=wip)
# implicitly add a capacity station controller
assert 'CSC' not in new_data['nodes'], "reserved ID used"
new_data['nodes']['CSC'] = dict(
_class='Dream.CapacityStationController',
name='CSC',
start=0,
interval=1, )
# "expand" abstract stations
for node_id, node_data in data['nodes'].items():
if node_data['_class'] == 'Dream.AbstractCapacityStation':
# remove the node
new_data['nodes'].pop(node_id)
# remove outbound edges, while keeping a reference to the next station
# to set nextCapacityStationBufferId on the exit
next_abstract_station = None
for edge_id, (source, dest, edge_dict) in \
list(new_data['edges'].items()):
# list because we remove some elements in the loop
if source == node_id:
next_abstract_station = dest
del new_data['edges'][edge_id]
wip = []
# set as wip all projects that have to be processed in this station
# firts
for project, requirement_dict in required_capacity_by_project.items():
if first_station_by_project[project] == node_id:
requirement = requirement_dict['%s_Station' % node_id]
name = '%s_%s_%s' % (project, node_id, requirement)
wip.append(
dict(_class='Dream.CapacityEntity',
id=name,
name=name,
capacityProjectId=project,
requiredCapacity=requirement))
new_data['nodes']["%s_Buffer" % node_id] = dict(
_class='Dream.CapacityStationBuffer',
id="%s_Buffer" % node_id,
name=node_data['name'],
wip=wip,
isAssembly=node_data['isAssembly']
)
new_data['nodes']["%s_Station" % node_id] = dict(
_class='Dream.CapacityStation',
id="%s_Station" % node_id,
name=node_data['name'],
intervalCapacity=available_capacity_by_station[node_id],
)
exit = dict(_class='Dream.CapacityStationExit',
id="%s_Exit" % node_id,
name=node_data['name'],)
# set nextCapacityStationBufferId
if next_abstract_station:
exit['nextCapacityStationBufferId'] = '%s_Buffer' % next_abstract_station
new_data['nodes']["%s_Exit" % node_id] = exit
new_data['edges']['%s_1' % node_id] = [
"%s_Buffer" % node_id,
"%s_Station" % node_id,
{}]
new_data['edges']['%s_2' % node_id] = [
"%s_Station" % node_id,
"%s_Exit" % node_id,
{}]
return new_data
import json
import datetime
from dream.simulation.LineGenerationJSON import main as simulate_line_json
from dream.simulation.Queue import Queue
from copy import deepcopy
# describe type for properties
schema = {
"entity": {
"id": "entity",
"name": "Entity Class",
"description": "The Class of entity created",
"type": "string",
"_class": "Dream.Property",
"_default": "Dream.Part"
},
"mean": {
"id": "mean",
"type": "number",
"name": "Mean",
"description": "Mean value of fixed processing time.",
"_class": "Dream.Property",
"_default": 1,
},
"distributionType": {
"id": "distributionType",
"name": "Distribution Type",
"description": "The distribution type, one of Fixed, Exp, Normal",
"type": "string",
"choice": ([["Fixed", "Fixed"],
["Exp", "Exp"],
["Normal", "Normal"]]),
"_class": "Dream.Property",
"_default": "Fixed"
},
"stdev": {
"id": "stdev",
"type": "number",
"name": "Standard Deviation",
"_class": "Dream.Property",
},
"min": {
"id": "min",
"type": "number",
"name": "Minimum Value",
"_class": "Dream.Property",
},
"max": {
"id": "max",
"type": "number",
"name": "Maximum Value",
"_class": "Dream.Property",
},
"timeUnitPerDay": {
"id": "timeUnitPerDay",
"type": "number",
"name": "Number of time units per day",
"description": "Used for input and reporting widgets."
" For example, 24 means that simulation clock time unit is one hour.",
"_class": "Dream.Property",
"_default": 24
},
"failureDistribution": {
"id": "failureDistribution",
"type": "string",
"name": "Failures Distribution",
"_class": "Dream.Property",
"_default": "No"
},
"MTTF": {
"id": "MTTF",
"type": "number",
"description": "Mean time to failure",
"_class": "Dream.Property",
"_default": 40
},
"MTTR": {
"id": "MTTR",
"type": "number",
"description": "Mean time to repair",
"_class": "Dream.Property",
"_default": 10
},
"repairman": {
"id": "repairman",
"name": "Repairman",
"type": "string",
"_class": "Dream.Property",
"_default": "None"
},
"operationType": {
"id": "operationType",
"type": "string",
"name": "Operation Type",
"choice": [["Auto", "MT-Load-Setup"],
["Manual", "MT-Load-Processing"]],
"_class": "Dream.Property",
"_default": "MT-Load-Processing"
},
"isDummy": {
"id": "isDummy",
"type": "string",
"_class": "Dream.Property",
"_default": "0"
},
"schedulingRule": {
"id": "schedulingRule",
"type": "string",
"name": "Scheduling Rule",
"description": "Scheduling Rule, one of %s" % (" ".join(
Queue.getSupportedSchedulingRules())),
"choice": [(rule, rule) for rule in Queue.getSupportedSchedulingRules()],
"_class": "Dream.Property",
"_default": "FIFO"
},
"capacity": {
"id": "capacity",
"type": "number",
"name": "Capacity",
"_class": "Dream.Property",
"_default": 1
},
"numberOfReplications": {
"id": "numberOfReplications",
"name": "Number of replications",
"type": "number",
"_class": "Dream.Property",
"_default": 10
},
"maxSimTime": {
"id": "maxSimTime",
"type": "number",
"name": "Length of experiment",
"_class": "Dream.Property",
"_default": 100
},
"confidenceLevel": {
"id": "confidenceLevel",
"type": "number",
"name": "Confidence level",
"description": "Confidence level for statiscal analysis of stochastic experiments",
"_class": "Dream.Property",
"_default": 0.95
},
"processTimeout": {
"id": "processTimeout",
"type": "number",
"name": "Process Timeout",
"description": "Number of seconds before the calculation process is interrupted",
"_class": "Dream.Property",
"_default": 10
},
"seed": {
"id": "seed",
"name": "Seed for random number generator",
"description": "When using the same seed, the random number generator"
" produce the same sequence of numbers",
"type": "string",
"_class": "Dream.Property",
"_default": "",
},
"ke_url": {
"id": "ke_url",
"name": "URL for Knowledge Extraction Spreadsheet",
"description": "The URL for knowledge extraction to access its data"
" for example "
"http://git.erp5.org/gitweb/dream.git/blob_plain/HEAD:/dream/KnowledgeExtraction/Mockup_Processingtimes.xls",
"type": "string",
"_class": "Dream.Property",
"_default":
"http://git.erp5.org/gitweb/dream.git/blob_plain/HEAD:/dream/KnowledgeExtraction/Mockup_Processingtimes.xls",
},
"batchNumberOfUnits": {
"id": "batchNumberOfUnits",
"type": "number",
"name": "Number of Units",
"description": "Number of units of the created batch",
"_class": "Dream.Property",
"_default": 80
},
"numberOfSubBatches": {
"id": "numberOfSubBatches",
"type": "number",
"name": "Number of sub batches",
"description": "Number of sub batches that the batch is split to",
"_class": "Dream.Property",
"_default": 10
},
"method": {
"id": "method",
"type": "string",
"_class": "Dream.Property",
"_default": "Globals.countIntervalThroughput"
},
"start": {
"id": "start",
"type": "number",
"_class": "Dream.Property",
"_default": 1
},
"stop": {
"id": "stop",
"type": "number",
"_class": "Dream.Property",
"_default": -1
},
"interval": {
"id": "interval",
"type": "number",
"_class": "Dream.Property",
"_default": 10
},
"duration": {
"id": "duration",
"type": "number",
"_class": "Dream.Property",
"_default": 10
},
"argumentDict": {
"id": "argumentDict",
"type": "string", # XXX json encoded ?
"_class": "Dream.Property",
"_default": "{}"
},
"currentDate": {
"id": "currentDate",
"type": "string",
"name": "Simulation Start Time",
"description": "The day the experiment starts, in YYYY/MM/DD format",
"_class": "Dream.Property",
"_default": datetime.datetime.now().strftime('%Y/%m/%d')
},
"trace": {
"id": "trace",
"name": "Output Trace",
"description": "Create an excel trace file (Yes or No)",
"type": "string",
"_class": "Dream.Property",
"_default": "No"
},
}
# helper function to overload a property
def overloaded_property(prop, overload):
prop = deepcopy(prop)
prop.update(overload)
return prop
# complex schemas (Dream.PropertyList)
schema["processingTime"] = {
"id": "processingTime",
"name": "Processing Time",
"property_list": [
schema["distributionType"],
overloaded_property(schema["mean"], {"_default": 0.75}),
schema["stdev"],
schema["min"],
schema["max"]
],
"_class": "Dream.PropertyList"
}
schema["interarrivalTime"] = {
"id": "interarrivalTime",
"name": "Interarrival Time",
"property_list": [
schema["distributionType"],
schema["mean"],
schema["stdev"],
schema["min"],
schema["max"],
],
"_class": "Dream.PropertyList"
}
schema["failures"] = {
"id": "failures",
"name": "Failures",
"property_list": [
schema["failureDistribution"],
schema["MTTF"],
schema["MTTR"],
schema["repairman"]
],
"_class": "Dream.PropertyList"
}
class Simulation(object):
def __init__(self, logger=None):
self.logger = logger
def getConfigurationDict(self):
"""Returns the possible nodes to use in the graph editor, and the global
configuration.
"""
return {
"Dream-Source": {
"property_list": [
schema["interarrivalTime"],
schema["entity"]
],
"short_id": "S",
"_class": 'Dream.Source'
},
"Dream-Machine": {
"property_list": [
schema["processingTime"],
schema["failures"]
],
"short_id": "M",
"_class": 'Dream.Machine'
},
"Dream-Queue": {
"property_list": [
schema["capacity"],
# schema["isDummy"],
schema["schedulingRule"]
],
"short_id": "Q",
"_class": 'Dream.Queue'
},
"Dream-Exit": {
"short_id": "E",
"_class": 'Dream.Exit'
},
"Dream-Repairman": {
"short_id": "R",
"property_list": [schema["capacity"]],
"_class": 'Dream.Repairman'
},
"Dream-EventGenerator": {
"name": "Event Generator",
"short_id": "EG",
"property_list": [schema['start'], schema['stop'], schema['duration'],
schema['interval'], schema['method'], schema['argumentDict']],
"_class": "Dream.EventGenerator",
},
# global configuration
"Dream-Configuration": {
"property_list": [
schema["numberOfReplications"],
schema["maxSimTime"],
schema["confidenceLevel"],
schema["processTimeout"],
schema["currentDate"],
schema["timeUnitPerDay"],
schema["trace"],
schema["seed"],
schema["ke_url"],
],
"gui": {
'debug_json': 1,
'wip_spreadsheet': 0,
'wip_part_spreadsheet': 0,
'shift_spreadsheet': 0,
'station_utilisation_graph': 1,
'job_schedule_spreadsheet': 0,
'download_excel_spreadsheet': 0,
'job_gantt': 0,
'exit_stat': 1,
'queue_stat': 1,
},
"_class": 'Dream.Configuration'
},
}
def runOneScenario(self, data):
"""Run one scenario.
To be reused by subclasses.
"""
return json.loads(simulate_line_json(input_data=json.dumps(data)))
def _preprocess(self, data):
"""Preprocess the data, for instance reading spreadsheet.
"""
# by default we add an event generator if using queue stats
# if self.getConfigurationDict()["Dream-Configuration"]["gui"]["queue_stat"]:
if data["application_configuration"]["output"]["view_queue_stats"]:
for node in data["graph"]["node"].values():
if node['_class'] in ('Dream.Queue', ):
node['gatherWipStat'] = 1
return data
def run(self, data):
"""Run simulation and return result to the GUI.
"""
prepocessed_data = self._preprocess(data)
return [{"key": "default",
"score": 0,
"result": self.runOneScenario(prepocessed_data),
"input": prepocessed_data}]
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
'''
Created on 17 Apr 2014
@author: Anna, George
'''
'''
test script to convert the static excels to JSON. It does not communicate with GUI yet
'''
import xlwt
import json
from dream.simulation.AllocationManagement import AllocationManagement
from dream.simulation.LineGenerationJSON import main as simulate_line_json
from dream.simulation.Globals import G
from dream.simulation.GUI.Default import Simulation as DefaultSimulation
from dream.simulation.GUI.Default import schema, overloaded_property
class IG:
TargetPPOS = 0
TargetPPOSqty = 0
TargetPPOSweek = 0
maxEarliness = 0 # max number of weeks for earliness
maxLateness = 0 # max number of weeks for lateness
minPackingSize = 0
CapacityDict={}
RouteDict={}
def createGlobals():
G.ReplicationNo = 0
G.replication = 0
G.PPOSlist = {}
G.Capacity = []
G.route = {}
G.maxEarliness = 0 # max number of weeks for earliness
G.maxLateness = 0 # max number of weeks for lateness
G.planningHorizon =0 # for future demand purposes
G.demandFile = None
G.currentCapacity = None
G.reCapacity = []
G.PPOSprofile = [] # initial disaggregation for PPOS
G.FutureProfile = [] # initial disaggregation for future demand
G.AllocationFuture = []
G.FutureLateness = []
G.FutureEarliness = []
G.AllocationPPOS = []
G.PPOSLateness = []
G.PPOSEarliness = []
G.minPackingSize = 0
G.Buffer = []
G.ExcessPPOSBuffer = []
G.ExcessPPOSminBuffer = []
G.ExcessFutureBuffer = []
G.ExcessFutureMinBuffer = []
G.DistributionType=None
# filterItem = 0
# filterWeek = 0
#===================================
# import simulation input data
#===================================
def readGeneralInput(data):
# Info on PPOS to be disaggregated
# PPOS ID 1
# PPOS Quantity 430
IG.TargetPPOS = data['general']['TargetPPOS'] - 1
IG.TargetPPOSqty = data['general']['TargetPPOSqty']
# Time Line
# Week when the disaggregation has to be performed 2
# Planning horizon (consistent with capacity info) 3
IG.TargetPPOSweek = data['general']['TargetPPOSweek'] - 1
G.planningHorizon = data['general']['planningHorizon']
# Info on Global Demand - normal distribution parameters
# DistributionType Normal
# Mean 100000
# Standard Deviation 3000
# XXX those 3 cannot be configured
# Info on scenario analysis
# Number of Iterations 1
G.ReplicationNo = data['general']['numberOfReplications']
# Info on Time Consntraints for Allocation
# Max Earliness 1
# Max Lateness 1
IG.maxEarliness = data['general']['maxEarliness']
IG.maxLateness = data['general']['maxLateness']
# Info on minimum allocable size
# Min Packing Size 10
IG.minPackingSize = data['general']['minPackingSize']
capacity_data = data['dp_capacity_spreadsheet']
assert(len(capacity_data[0]) == G.planningHorizon+2)
capacity = []
for i in range(2, len(capacity_data) - 1):
capacity.append([int(x) for x in capacity_data[i][1:-1]])
G.Capacity = capacity
route_data = data['dp_route_spreadsheet']
for i in range(3, len(route_data[0]) - 1):
IG.CapacityDict[route_data[2][i]] = G.Capacity[i-3]
for i in range(4, len(route_data) - 1):
id = int(route_data[i][2])
ppos = int(route_data[i][0])
sp = int(route_data[i][1])
IG.RouteDict[id] = {'PPOS': ppos, 'SP': sp, 'route':{}}
for j in range(3, len(route_data[i]) - 1):
IG.RouteDict[id]['route'][route_data[2][j]] = float(route_data[i][j])
def writeOutput():
wbin = xlwt.Workbook()
for k in range(G.ReplicationNo):
#export info on lateness
sheet1=wbin.add_sheet('Lateness'+str(k+1))
sheet1.write(0,0,'replication')
sheet1.write(0,1,k+1)
sheet1.write(2,0,'PPOS Lateness')
sheet1.write(2,1,G.PPOSLateness[k])
sheet1.write(3,0,'PPOS Earliness')
sheet1.write(3,1,G.PPOSEarliness[k])
sheet1.write(1,3,'Unconstrained Excess Units')
sheet1.write(1,4,'Min Excess Units')
excessPPOS = sum([i.qty for i in G.ExcessPPOSBuffer[k]])
minExcessPPOS = sum([i.qty for i in G.ExcessPPOSminBuffer[k]])
sheet1.write(2,3,excessPPOS)
sheet1.write(2,4, minExcessPPOS)
excessFuture = sum([i.qty for i in G.ExcessFutureBuffer[k]])
minExcessFuture = sum([i.qty for i in G.ExcessFutureMinBuffer[k]])
sheet1.write(1,6,'% Unconstrained Excess')
sheet1.write(1,7,'% Min Excess')
sheet1.write(4,3,excessFuture)
sheet1.write(4,4,minExcessFuture)
sheet1.write(4,0,'Future Demand Lateness')
sheet1.write(4,1,G.FutureLateness[k])
sheet1.write(5,0,'Future Demand Earliness')
sheet1.write(5,1,G.FutureEarliness[k])
# Export PPOS/Future allocation Results
for z in range(2):
if z==0:
shName = 'PPOSAllocation'+str(k+1)
itemName = 'Initial PPOS Demand Disaggregation'
profile = G.PPOSprofile[k]
alloc = G.AllocationPPOS[k]
else:
shName = 'FutureAllocation'+str(k+1)
itemName = 'Initial Future Demand Disaggregation'
profile = G.FutureProfile[k]
alloc = G.AllocationFuture[k]
sheet = wbin.add_sheet(shName)
sheet.write_merge(0,0,0,4,itemName)
sheet.write(1,0,'Order ID')
sheet.write(1,1,'MA ID')
sheet.write(1,2,'Total # Units')
sheet.write(1,3,'Min # Units')
sheet.write(1,4,'Planned Week')
for i in range(len(profile)):
for j in range(len(profile[i])):
sheet.write(i+2,j,profile[i][j])
totQty = sum([i[2] for i in profile])
if z==0:
#pposQty = totQty
sheet1.write(2,6,excessPPOS*100.0/totQty)
sheet1.write(2,7,minExcessPPOS*100.0/totQty)
else:
sheet1.write(4,6,excessFuture*100.0/totQty)
sheet1.write(4,7,minExcessFuture*100.0/totQty)
counterCols = [5 for i in range(len(profile))]
# TODO the below crashes, got to check
for i in range(len(alloc)):
for j in range(3):
sheet.write(alloc[i][0]+2,counterCols[alloc[i][0]]+j,alloc[i][j+1])
counterCols[alloc[i][0]] += 3
attempts = (max(counterCols)-5)/3
for i in range(attempts):
sheet.write_merge(0,0,5+(i*3),5+(i*3)+2,'Allocation Attempt No.'+str(i+1))
sheet.write(1,5+(i*3),'MA ID')
sheet.write(1,5+(i*3)+1,'# Allocated Units')
sheet.write(1,5+(i*3)+2,'Week')
# Excess units
for z in range(2):
for y in range(2):
if z==0:
if y == 0:
shName = 'PPOSExcess'+str(k+1)
buf = G.ExcessPPOSBuffer[k]
else:
shName = 'PPOSminExcess'+str(k+1)
buf = G.ExcessPPOSminBuffer[k]
else:
if y == 0:
shName = 'FutureExcess'+str(k+1)
buf = G.ExcessFutureBuffer[k]
else:
shName = 'FutureMinExcess'+str(k+1)
buf = G.ExcessFutureMinBuffer[k]
row = 1
if len(buf):
sheet = wbin.add_sheet(shName)
sheet.write(0,0,'Order ID')
sheet.write(0,1,'MA ID')
sheet.write(0,2,'excess Units')
for i in buf:
sheet.write(row,0,i.orderID+1)
sheet.write(row,1,i.MAid)
sheet.write(row,2,i.qty)
row +=1
# remaining capacity
sheet = wbin.add_sheet('Capacity'+str(k+1))
sheet.write_merge(0,0,1,G.planningHorizon,'Weeks')
for i in range(G.planningHorizon):
sheet.write(1,i+1,i+1)
sheet.write_merge(0,1,0,0,'Bottlenecks')
i=2
for record in G.CurrentCapacityDict:
sheet.write(i,0,record)
sheet.write(i,1,G.CurrentCapacityDict[record][0])
sheet.write(i,2,G.CurrentCapacityDict[record][1])
sheet.write(i,3,G.CurrentCapacityDict[record][2])
i+=1
wbin.save("demandPlannerOutput.xls") # temporary have a file for verification
import StringIO
out = StringIO.StringIO()
wbin.save(out)
return out.getvalue()
class Simulation(DefaultSimulation):
def getConfigurationDict(self):
conf = {'Dream-Configuration':
DefaultSimulation.getConfigurationDict(self)['Dream-Configuration']}
conf["Dream-Configuration"]["gui"]["exit_stat"] = 0
conf["Dream-Configuration"]["gui"]["debug_json"] = 0
conf["Dream-Configuration"]["gui"]["graph_editor"] = 0
conf["Dream-Configuration"]["gui"]["station_utilisation_graph"] = 0
conf["Dream-Configuration"]["gui"]["exit_stat"] = 0
conf["Dream-Configuration"]["gui"]["queue_stat"] = 0
conf["Dream-Configuration"]["gui"]["download_excel_spreadsheet"] = 1
conf["Dream-Configuration"]["gui"]["dp_capacity_spreadsheet"] = 1
conf["Dream-Configuration"]["gui"]["dp_route_spreadsheet"] = 1
prop_list = conf["Dream-Configuration"]["property_list"] = []
prop_list.append({
"id": "TargetPPOS",
"name": "PPOS ID",
"description": "Info on PPOS to be disaggregated",
"type": "number",
"_class": "Dream.Property",
"_default": 1
})
prop_list.append({
"id": "TargetPPOSqty",
"name": "PPOS Quantity",
"description": "Info on PPOS to be disaggregated",
"type": "number",
"_class": "Dream.Property",
"_default": 430
})
prop_list.append({
"id": "TargetPPOSweek",
"name": "PPOS Week",
"description": "Week when the disaggregation has to be performed",
"type": "number",
"_class": "Dream.Property",
"_default": 2
})
prop_list.append({
"id": "planningHorizon",
"name": "Planning horizon",
"description": "Planning horizon (consistent with capacity info)",
"type": "number",
"_class": "Dream.Property",
"_default": 3
})
prop_list.append(overloaded_property(schema['numberOfReplications'],
{'_default': 1}))
prop_list.append({
"id": "maxEarliness",
"name": "Max Earliness",
"description": "Info on Time Constraints for Allocation",
"type": "number",
"_class": "Dream.Property",
"_default": 1
})
prop_list.append({
"id": "maxLateness",
"name": "Max Lateness",
"description": "Info on Time Constraints for Allocation",
"type": "number",
"_class": "Dream.Property",
"_default": 1
})
prop_list.append({
"id": "minPackingSize",
"name": "Min Packing Size",
"description": "Info on minimum allocable size",
"type": "number",
"_class": "Dream.Property",
"_default": 10
})
prop_list.append(overloaded_property(schema['ke_url'],
{'_default':
'http://git.erp5.org/gitweb/dream.git/blob_plain/HEAD:/dream/simulation/Examples/DemandProfile.xlsx'}))
return conf
def run(self, data):
# set up global variables
createGlobals()
# read from inputs spreadsheet
readGeneralInput(data)
inputDict={}
inputDict['_class']='Dream.Simulation'
# set general attributes
inputDict['general']={}
inputDict['general']['maxSimTime']=G.planningHorizon
inputDict['general']['numberOfReplications']=G.ReplicationNo
inputDict['general']['_class']='Dream.Simulation'
inputDict['edges']={}
inputDict['nodes']={}
inputDict['nodes']['AM']={}
inputDict['nodes']['AM']['_class']='Dream.AllocationManagement'
inputDict['nodes']['AM']['id']='AM1'
inputDict['nodes']['AM']['name']='AM1'
inputDict['nodes']['AM']['argumentDict']={}
# set current PPOS attributes
inputDict['nodes']['AM']['argumentDict']['currentPPOS']={}
inputDict['nodes']['AM']['argumentDict']['currentPPOS']['id']=IG.TargetPPOS
inputDict['nodes']['AM']['argumentDict']['currentPPOS']['quantity']=IG.TargetPPOSqty
inputDict['nodes']['AM']['argumentDict']['currentPPOS']['targetWeek']=IG.TargetPPOSweek
# set allocation attributes
inputDict['nodes']['AM']['argumentDict']['allocationData']={}
inputDict['nodes']['AM']['argumentDict']['allocationData']['maxEarliness']=IG.maxEarliness
inputDict['nodes']['AM']['argumentDict']['allocationData']['maxLateness']=IG.maxLateness
inputDict['nodes']['AM']['argumentDict']['allocationData']['minPackingSize']=IG.minPackingSize
# set capacity attributes
inputDict['nodes']['AM']['argumentDict']['capacity']=IG.CapacityDict
# set MA attributes
inputDict['nodes']['AM']['argumentDict']['MAList']=IG.RouteDict
G.argumentDictString=json.dumps(inputDict, indent=5)
G.demandFile = data['general']['ke_url']
out = json.loads(simulate_line_json(input_data=(G.argumentDictString)))
output = writeOutput()
out['demandPlannerOutput.xls'] = output.encode('base64')
return [{'key': 'default', 'score':0, 'result': out}]
from copy import copy
import json
import time
import random
import operator
from datetime import datetime
from dream.simulation.GUI import ACO
from dream.simulation.GUI.Default import schema
MACHINE_TYPE_SET = set(["Dream.MachineManagedJob", "Dream.MouldAssembly"])
class Simulation(ACO.Simulation):
def getConfigurationDict(self):
conf = ACO.Simulation.getConfigurationDict(self)
conf["Dream-MachineManagedJob"] = {
"property_list": [
schema["operationType"]
],
"_class": 'Dream.MachineManagedJob',
"name": 'Machine',
"short_id": "M",
}
conf["Dream-MouldAssembly"] = {
"property_list": [
schema["operationType"]
],
"_class": 'Dream.MouldAssembly',
"name": 'Assembly',
"short_id": "MA",
}
conf["Dream-QueueManagedJob"] = {
"property_list": [
schema["capacity"],
schema["schedulingRule"]
],
"_class": 'Dream.QueueManagedJob',
"name": 'Queue',
"short_id": "Q",
}
conf["Dream-ConditionalBuffer"] = {
"property_list": [
schema["capacity"],
schema["schedulingRule"]
],
"_class": 'Dream.ConditionalBuffer',
"name": 'Cam Queue',
"short_id": "B",
}
conf["Dream-MouldAssemblyBuffer"] = {
"property_list": [
schema["capacity"],
schema["schedulingRule"]
],
"name": 'Assembly Queue',
"short_id": "MA",
}
conf["Dream-ExitJobShop"] = {
"_class": 'Dream.ExitJobShop',
"name": 'Exit',
"short_id": "E",
}
conf["Dream-OperatorManagedJob"] = {
"_class": 'Dream.OperatorManagedJob',
"name": 'Operator',
"short_id": "PM",
}
conf["Dream-OrderDecomposition"] = {
"_class": 'Dream.OrderDecomposition',
"name": 'Decomposition',
"short_id": "D",
}
conf["Dream-Configuration"]["gui"]["wip_part_spreadsheet"] = 1
conf["Dream-Configuration"]["gui"]["job_schedule_spreadsheet"] = 1
conf["Dream-Configuration"]["gui"]["job_gantt"] = 1
conf["Dream-Configuration"]["gui"]["queue_stat"] = 0
conf["Dream-Configuration"]["gui"]["debug_json"] = 1
# remove tools that does not make sense here
conf.pop('Dream-Machine')
conf.pop('Dream-Queue')
conf.pop('Dream-Exit')
conf.pop('Dream-Repairman')
conf.pop('Dream-Source')
conf.pop('Dream-EventGenerator')
return conf
def getMachineNameSet(self, step_name):
"""
Give list of machines given a particular step name. For example
if step_name is "CAM", it will return ["CAM1", "CAM2"]
"""
machine_name_set = set()
for machine_name in self.data["nodes"].keys():
if machine_name.startswith(step_name):
machine_name_set.add(machine_name)
return machine_name_set
def getNotMachineNodePredecessorList(self, step_name):
"""
Give the list of all predecessors that are not of type machine
For example, for step_name "CAM", it may return "QCAM"
"""
predecessor_list = []
machine_name_set = self.getMachineNameSet(step_name)
for edge in self.data["edges"].values():
if edge[1] in machine_name_set:
predecessor_step = edge[0]
if predecessor_step in predecessor_list:
continue
if not self.data["nodes"][predecessor_step]["_class"] in MACHINE_TYPE_SET:
predecessor_list = [predecessor_step] + predecessor_list
predecessor_list = [x for x in self.getNotMachineNodePredecessorList(predecessor_step) \
if x not in predecessor_list] + predecessor_list
return predecessor_list
def getRouteList(self, sequence_list, processing_time_list, prerequisite_list):
# use to record which predecessor has been already done, used to avoid doing
# two times Decomposition
predecessor_set = set()
route_list = []
for j, sequence_step in enumerate(sequence_list):
for predecessor_step in self.getNotMachineNodePredecessorList(sequence_step):
# We avoid having two time Decomposition in the route. XXX Is this correct ?
if predecessor_step == "Decomposition" and predecessor_step in predecessor_set:
continue
predecessor_set.add(predecessor_step)
route = {"stationIdsList": [predecessor_step],
}
route_list.append(route)
route = {"stationIdsList": list(self.getMachineNameSet(sequence_step)),
"processingTime": {"distributionType": "Fixed",
"mean": float(processing_time_list[j])},
"setupTime": {"distributionType": "Fixed",
"mean": .5}, # XXX hardcoded value
}
if prerequisite_list:
route["prerequisites"] = prerequisite_list
route_list.append(route)
return route_list
def getListFromString(self, my_string):
my_list = []
if not my_string in (None, ''):
my_list = my_string.split('-')
return my_list
def _preprocess(self, in_data):
""" Set the WIP in queue from spreadsheet data.
"""
data = copy(ACO.Simulation._preprocess(self, in_data))
self.data = data
now = datetime.now()
if data['general']['currentDate']:
now = datetime.strptime(data['general']['currentDate'], '%Y/%m/%d')
if 'wip_part_spreadsheet' in data:
wip_list = []
i = 0
wip_part_spreadsheet_length = len(data['wip_part_spreadsheet'])
while i < wip_part_spreadsheet_length:
value_list = data['wip_part_spreadsheet'][i]
if value_list[0] == 'Order ID' or not value_list[4]:
i += 1
continue
order_dict = {}
wip_list.append(order_dict)
order_id, due_date, priority, project_manager, part, part_type,\
sequence_list, processing_time_list, prerequisite_string = value_list
due_date = (datetime.strptime(due_date, '%Y/%m/%d') - now).days * 24
prerequisite_list = self.getListFromString(prerequisite_string)
sequence_list = sequence_list.split('-')
processing_time_list = processing_time_list.split('-')
order_dict["_class"] = "Dream.Order"
order_dict["id"] = "%i" % i # XXX hack, we use it in UI to retrieve spreadsheet line
order_dict["manager"] = project_manager
order_dict["name"] = order_id
order_dict["dueDate"] = due_date
order_dict["priority"] = float(priority)
# XXX make it dynamic by writing a function that will reuse the
# code available a bit after
order_dict["route"] = self.getRouteList(sequence_list, processing_time_list,
prerequisite_list)
i += 1
component_list = []
if i < wip_part_spreadsheet_length:
while data['wip_part_spreadsheet'][i][0] in (None, ''):
value_list = data['wip_part_spreadsheet'][i]
if value_list[4] in (None, ''):
break
order_id, due_date, priority, project_manager, part, part_type,\
sequence_list, processing_time_list, prerequisite_string = value_list
sequence_list = sequence_list.split('-')
prerequisite_list = self.getListFromString(prerequisite_string)
processing_time_list = processing_time_list.split('-')
component_dict = {}
component_dict["_class"] = "Dream.OrderComponent"
if part_type == "Mould":
component_dict["_class"] = "Dream.Mould"
component_dict["componentType"] = part_type
component_dict["id"] = "%i" % i # XXX hack, we use it in UI to retrieve spreadsheet line
component_dict["name"] = part
component_list.append(component_dict)
route_list = self.getRouteList(sequence_list, processing_time_list,
prerequisite_list)
if part_type == "Mould":
route_list = route_list[1:]
component_dict["route"] = route_list
i+=1
order_dict["componentsList"] = component_list
data["nodes"]["QStart"]["wip"] = wip_list
return data
from copy import copy
import json
import time
import random
import operator
import datetime
from dream.simulation.GUI.Default import Simulation as DefaultSimulation
import logging
logger = logging.getLogger('dream.platform')
class Simulation(DefaultSimulation):
def _preprocess(self, data):
"""Preprocess data, reading shift spreadsheet
"""
data = DefaultSimulation._preprocess(self, data)
strptime = datetime.datetime.strptime
now = strptime(data['general']['currentDate'], '%Y/%m/%d')
shift_by_station = {}
for line in data['shift_spreadsheet'][1:]:
if line[1]:
# Get the dates, and convert them to simulation clock time units.
# In this class, time unit is a minute (XXX it can be an option)
start_date = strptime("%s %s" % (line[0], line[2]), '%Y/%m/%d %H:%M')
start_time = (start_date - now).total_seconds() // 60
stop_date = strptime("%s %s" % (line[0], line[3]), '%Y/%m/%d %H:%M')
stop_time = (stop_date - now).total_seconds() // 60
for station in line[1].split(','):
station = station.strip()
shift_by_station.setdefault(station, []).append(
(start_time, stop_time) )
for node, node_data in data['nodes'].items():
if node in shift_by_station:
node_data['shift'] = {'shiftPattern': shift_by_station.pop(node),
'endUnfinished': 0} # XXX shall we make this
# configurable ?
assert not shift_by_station, \
"Some stations only exist in shift but not in graph: %r"\
% shift_by_station.keys()
# from pprint import pformat
# logger.info(pformat(data))
return data
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
# See http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment