Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from __future__ import division
- import math
- import time
- from multiprocessing import Pool
- import random
- import os
- from numba import jit, prange
- import numpy as np
- import matplotlib.pyplot as plt
- from decimal import Decimal
- from productionoptimization import *
- from shutil import copyfile
- import re
- import subprocess
- import re
- import numpy as np
- import matplotlib.pyplot as plt
- import seaborn as sns
- import pandas as pd
- from mpl_toolkits.mplot3d import Axes3D
- import math
- from copy import deepcopy
- import pickle
- from shutil import copyfile
- import pickle
- import threading
- import multiprocessing
- #.....................IMPORT MAIN Functions..................
- def check_Verticallayers(horizons, wells):
- for i, well in enumerate(wells):
- wells[i].horizons = []
- for j, horizon in enumerate(horizons):
- horizon_z = horizon.get_grid_block(well.trajectory_nodes[0].x, well.trajectory_nodes[0].y).get_center().prop
- if horizon_z > well.trajectory_nodes[0].prop and horizon_z < well.trajectory_nodes[1].prop:
- wells[i].horizons.append(j + 1)
- def get_well_welspecs(well, i, skeleton, well_type):
- if well_type == 'p':
- string = 'PRD' + str(i).zfill(3) + ' PRODUCER '
- end_string = 'OIL /\n'
- else:
- string = 'INJ' + str(i).zfill(3) + ' INJECTOR '
- end_string = 'WATER /\n'
- grid_block = skeleton.get_grid_block(well.trajectory_nodes[0].x, well.trajectory_nodes[0].y)
- string += str(grid_block.i) + ', ' + str(182 - grid_block.j) + ', 1* ' + end_string
- return string
- def get_well_compdat(well, i, skeleton, well_type, horizon=None):
- well_lines = []
- if well_type == 'p':
- start_string = 'PRD' + str(i).zfill(3) + ' '
- else:
- start_string = 'INJ' + str(i).zfill(3) + ' '
- if len(well.trajectory_nodes) == 2: # vertical
- node_skeleton = skeleton.get_grid_block(well.trajectory_nodes[0].x, well.trajectory_nodes[0].y)
- i = node_skeleton.i
- j = 182 - node_skeleton.j
- for h in well.horizons:
- string = start_string + str(i) + ' ' + str(j) + ' ' + str(h) + ' ' + str(h)
- string += ' OPEN 1* 1* 0.19050 1* 0.00 1* Z 1* /\n'
- well_lines.append(string)
- else:
- prev_i = 0
- prev_j = 0
- for node in well.trajectory_nodes:
- node_skeleton = skeleton.get_grid_block(node.x, node.y)
- i = node_skeleton.i
- j = 182 - node_skeleton.j
- if prev_i == i and prev_j == j:
- continue
- string = start_string + str(i) + ' ' + str(j) + ' ' + horizon + ' ' + horizon
- string += ' OPEN 1* 1* 0.19050 1* 0.00 1* X 1* /\n'
- well_lines.append(string)
- return well_lines
- def prepare_datafile(filename, particle):
- file = 'Run' + str(particle) + '.DATA'
- file1 = 'Run' + str(particle)
- lines = open(filename, 'r').readlines()
- lines[-1] = '\'RUN_sch' + str(particle) + '.INC\' /'
- out = open(file, 'w')
- out.writelines(lines)
- out.close()
- return file1
- def prepare_schedule_file(filename, particle, producers, injectors, skeleton):
- lines = open(filename, 'r').readlines()
- new_lines = []
- copying = True
- for li, line in enumerate(lines):
- if copying:
- new_lines.append(line)
- if re.search(r'WELSPECS', line):
- copying = False
- for i, well in enumerate(producers):
- line = get_well_welspecs(well, i + 1, skeleton, 'p')
- new_lines.append(line)
- for i, well in enumerate(injectors):
- line = get_well_welspecs(well, i + 1, skeleton, 'i')
- new_lines.append(line)
- new_lines.append(' /\n')
- if re.match(r'\s*/', line):
- copying = True
- if re.search(r'COMPDAT', line):
- print(copying)
- copying = False
- for i, well in enumerate(producers):
- well_lines = get_well_compdat(well, i + 1, skeleton, 'p')
- for l in well_lines:
- new_lines.append(l)
- for i, well in enumerate(injectors):
- well_lines = get_well_compdat(well, i + 1, skeleton, 'i')
- for l in well_lines:
- new_lines.append(l)
- new_lines.append(' /\n')
- if re.search(r'WCONPROD', line):
- copying = False
- for i, well in enumerate(producers):
- line = lines[li + 1]
- line = 'PRD' + str(i + 1).zfill(3) + line[6:]
- new_lines.append(line)
- new_lines.append(' /\n')
- if re.search(r'WCONINJE', line):
- copying = False
- for i, well in enumerate(injectors):
- line = lines[li + 1]
- line = 'INJ' + str(i + 1).zfill(3) + line[6:]
- new_lines.append(line)
- new_lines.append(' /\n')
- if re.search(r'WECON', line):
- copying = False
- for i, well in enumerate(producers):
- line = lines[li + 1]
- line = 'PRD' + str(i + 1).zfill(3) + line[6:]
- new_lines.append(line)
- new_lines.append(' /\n')
- filename = 'RUN_sch' + str(particle) + '.INC'
- out = open(filename, 'w')
- out.writelines(new_lines)
- out.close()
- def run_simulation(particle):
- file = 'Run' + str(particle)
- subprocess.call(['C:/ecl/2016.2/bin/pc_x86_64/eclipse ', file])
- def single_platform_optimiation(map, wells):
- minDis = 100000000000
- for i, nodes in map.nodes.items():
- wellsCost = 0
- for wells_ in wells:
- a = math.sqrt((nodes.x - wells_.trajectory_nodes[0].x) ** 2 + (
- nodes.y - wells_.trajectory_nodes[0].y) ** 2)
- b = math.sqrt((nodes.x - wells_.trajectory_nodes[-1].x) ** 2 + (
- nodes.y - wells_.trajectory_nodes[-1].y) ** 2)
- if a > b:
- cost = a * 10000 + 5000 * (wells_.trajectory_nodes[0].prop)
- else:
- cost = b * 10000 + 5000 * (wells_.trajectory_nodes[-1].prop)
- wellsCost = wellsCost + cost
- if wellsCost < minDis:
- minDis = wellsCost
- minNode = [nodes.x, nodes.y, 0]
- return minNode
- def well_cost_discounted(wells, platform_location):
- wellcost = []
- welltime = []
- for wells_ in wells:
- a = math.sqrt((platform_location[0] - wells_.trajectory_nodes[0].x) ** 2 + (
- platform_location[1] - wells_.trajectory_nodes[0].y) ** 2)
- b = math.sqrt((platform_location[0] - wells_.trajectory_nodes[-1].x) ** 2 + (
- platform_location[1] - wells_.trajectory_nodes[-1].y) ** 2)
- if a > b:
- cost = a * 10000 + 5000 * (wells_.trajectory_nodes[0].prop - platform_location[2])
- time = 0.015 * (wells_.trajectory_nodes[0].prop - platform_location[2]) + 0.02 * a
- else:
- cost = b * 10000 + 5000 * (wells_.trajectory_nodes[-1].prop - platform_location[2])
- time = 0.015 * (wells_.trajectory_nodes[-1].prop - platform_location[2]) + 0.02 * b
- wellcost.append(cost)
- welltime.append(time)
- return wellcost, welltime
- def read_rsm(file_name):
- filename = file_name + '.RSM'
- file = open(filename, "r")
- OilP = []
- WaterP = []
- WaterI = []
- Time = []
- WI_mult = 1
- WP_mult = 1
- OP_mult = 1
- for i, line in enumerate(file):
- if i == 6:
- if line[92:98] == '*10**3':
- WI_mult = 1000
- elif line[80:86] == '*10**3':
- WP_mult = 1000
- elif line[106:112] == '*10**3':
- OP_mult = 1000
- if i >= 10:
- data = [float(num) for num in line.split()]
- WaterPP, WaterII, Oil = data[-3:]
- time = data[0]
- Oil = Oil * OP_mult
- WaterPP = WaterPP * WP_mult
- WaterII = WaterII * WI_mult
- OilP.append(Oil)
- WaterP.append(WaterPP)
- WaterI.append(WaterII)
- Time.append(time)
- return OilP, WaterP, WaterI, Time
- def NPV_calculation(OilP, WaterP, WaterI, time, wellcost, welltime):
- DiscountFactor = 0.08
- OilCost = 45
- water_pcost = 6
- water_icost = 2
- discounted_rev = 0
- platformCost = 0000000
- k = 0
- discounted_cost = 0
- for i in range(len(OilP) - 1):
- discounted_rev = discounted_rev + (OilP[i + 1] - OilP[i]) * OilCost * 6.28 * (1 + DiscountFactor) ** (
- -time[i + 1] / 365) - (WaterP[i + 1] - WaterP[i]) * water_pcost * 6.28 * (1 + DiscountFactor) ** (
- -time[i + 1] / 365) - (WaterI[i + 1] - WaterI[i]) * water_icost * 6.28 * (
- 1 + DiscountFactor) ** (-time[i + 1] / 365)
- for i in range(len(wellcost)):
- k = k + 1
- discounted_cost = discounted_cost + wellcost[i] * (1 + DiscountFactor) ** (-(sum(welltime[:k]) / 365))
- discounted_rev = discounted_rev * (1 + DiscountFactor) ** (-(sum(welltime) / 365))
- NPV = discounted_rev - discounted_cost - platformCost
- return NPV
- # --- COST FUNCTION ------------------------------------------------------------+
- # X[0] positon of the first well,,x[1] maximum number of wells, x[2] maximum number opf producers. x[3] = well spacing factor
- # function we are attempting to optimize (minimize)
- def func1(y, par,netmap,cutoff_map,horizons,skeleton):
- y=np.array(y)
- y[1:] = y[1:].round()
- drainage_area = (2500 * len(cutoff_map.grid))
- well_spacing = 2*(math.sqrt(drainage_area / y[1] / math.pi) * y[0])
- first_well_location = list(cutoff_map.grid.values())[int(y[3])].get_center()
- first_well = Well()
- first_well.generate_well(first_well_location, 2085)
- netmap_half_spacing = deepcopy(netmap)
- netmap.remove_disk(first_well_location.x, first_well_location.y, well_spacing)
- netmap_half_spacing.remove_disk(first_well_location.x, first_well_location.y, well_spacing/2)
- bh_operator = BHPatternWaterInjectionVertical(
- optimization_map=netmap,
- optimization_map_half_well_spacing=netmap_half_spacing,
- z0=0,
- total_depth=2085,
- max_number_wells=y[1] - 1,
- max_number_producers=y[2] - 1,
- max_number_injectors=y[1] - y[2],
- well_spacing=well_spacing,
- radius_optimal_option=0,
- radius_optimal_value=340)
- producers, injectors, producers_map, injectors_map = bh_operator.generate_wells()
- producers.insert(0,first_well)
- wells = producers + injectors
- check_Verticallayers(horizons, wells)
- data_file = prepare_datafile("TEST_55t2.DATA", par)
- prepare_schedule_file("TEST_55_SCHt2.INC", par, producers, injectors, skeleton)
- run_simulation(par)
- single_platform_optimiation(netmap, wells)
- OIL, WATERP, WATERI, time = read_rsm(data_file)
- platform_location = single_platform_optimiation(horizons[0], wells)
- well_cost, well_time = well_cost_discounted(wells, platform_location)
- NPV = NPV_calculation(OIL, WATERP, WATERI, time, well_cost, well_time)
- return NPV
- def func2(y, par, netmap, cutoff_map, horizons, skeleton,horizon_index):
- y[:, 1:] = y[:, 1:].round()
- drainage_area = (2500 * len(cutoff_map.grid))
- well_spacing = 2 * (math.sqrt(drainage_area / y[1] / math.pi) * y[0])
- first_well_location = list(netmap.grid.values())[int(y[3])].get_center()
- first_well = Well()
- first_well.generate_well(first_well_location, 2085)
- first_well.generate_horizontal_section(y[4],20,0)
- first_well.optimize_horizontal_section_trajectory_azimuth(netmap,20)
- netmap_half_spacing = deepcopy(netmap)
- netmap.remove_disk(first_well_location.x, first_well_location.y, well_spacing)
- netmap_half_spacing.remove_disk(first_well_location.x, first_well_location.y, well_spacing / 2)
- bh_operator = BHPatternWaterInjectionHorizontal(
- optimization_map=netmap,
- optimization_map_half_well_spacing=netmap_half_spacing,
- z0=0,
- max_number_wells=y[1] - 1,
- max_number_producers=y[2] - 1,
- max_number_injectors=y[1] - y[2],
- well_spacing=well_spacing,
- azimuth_increment=20,
- horizon=horizons[horizon_index],
- well_length=y[4],
- number_of_segments=20,
- radius_optimal_option=0,
- radius_optimal_value=340)
- producers, injectors, producers_map, injectors_map = bh_operator.generate_wells()
- producers.insert(0, first_well)
- wells = producers + injectors
- check_Verticallayers(horizons, wells)
- data_file = prepare_datafile("TEST_55t2.DATA", par)
- prepare_schedule_file("TEST_55_SCHt2.INC", par, producers, injectors, skeleton)
- run_simulation(par)
- single_platform_optimiation(netmap, wells)
- OIL, WATERP, WATERI, time = read_rsm(data_file)
- platform_location = single_platform_optimiation(horizon, wells)
- well_cost, well_time = well_cost_discounted(wells, platform_location)
- NPV = NPV_calculation(OIL, WATERP, WATERI, time, well_cost, well_time)
- return NPV
- # --- MAIN ---------------------------------------------------------------------+
- class Particle:
- def __init__(self, x0):
- self.position_i = [] # particle position
- self.velocity_i = [] # particle velocity
- self.pos_best_i = [] # best position individual
- self.err_best_i = 0 # best error individual
- self.err_i = 0# error individual
- for i in range(0, num_dimensions):
- # self.velocity_i.append(random.uniform(-1, 1))
- self.velocity_i.append(0)
- self.position_i.append(x0[i])
- # evaluate current fitness
- def evaluate(self, costFunc, i,netmap,cutoff_map,horizons,skeleton):
- self.err_i = costFunc(self.position_i, i,netmap,cutoff_map,horizons,skeleton)
- # check to see if the current position is an individual best
- if self.err_i > self.err_best_i:
- self.pos_best_i = self.position_i
- self.err_best_i = self.err_i
- new_particle = deepcopy(self)
- return new_particle
- # update new particle velocity
- def update_velocity(self, pos_best_g):
- w = 0.7213
- c1 = 1.193
- c2 = 1.193
- for i in range(0, num_dimensions):
- r1 = random.random()
- r2 = random.random()
- vel_cognitive = c1 * r1 * (self.pos_best_i[i] - self.position_i[i])
- vel_social = c2 * r2 * (pos_best_g[i] - self.position_i[i])
- self.velocity_i[i] = w * self.velocity_i[i] + vel_cognitive + vel_social
- # update the particle position based off new velocity updates
- def update_position(self, bounds):
- for i in range(0, num_dimensions):
- self.position_i[i] = self.position_i[i] + self.velocity_i[i]
- # adjust maximum position if necessary
- if self.position_i[i] > bounds[i][1]:
- self.position_i[i] = bounds[i][1]
- # adjust minimum position if neseccary
- if self.position_i[i] < bounds[i][0]:
- self.position_i[i] = bounds[i][0]
- class PSO():
- def __init__(self, costFunc, x0, bounds, num_particles, maxiter,netmap,cutoff_map,horizons,skeleton):
- self.costFunc = costFunc
- self.x0 = x0
- self.bounds = bounds
- self.num_particles = num_particles
- self.maxiter = maxiter
- self.netmap = netmap
- self.cutoff_map = cutoff_map
- self.horizons = horizons
- self.skeleton = skeleton
- global num_dimensions
- xx = []
- num_dimensions = len(x0[0])
- err_best_g = 0 # best error for group
- pos_best_g = [] # best position for group
- # establish the swarm
- swarm = []
- for i in range(0, num_particles):
- swarm.append(Particle(x0[i]))
- GBest = []
- GBest_position = []
- costs = []
- # begin optimization loop
- k = 0
- while k < maxiter:
- # print i,err_best_g
- # cycle through particles in swarm and evaluate fitness
- processes = min(multiprocessing.cpu_count()-1, num_particles)
- #for i in range(num_particles):
- # self.parallel_func(i)
- self.swarm = deepcopy(swarm)
- pool = multiprocessing.Pool(processes=processes)
- self.swarm = pool.map(self.parallel_func, range(num_particles), 1)
- pool.close()
- pool.join()
- swarm = deepcopy(self.swarm)
- for j in range(0, num_particles): # determine if current particle is the best (globally)
- if swarm[j].err_i > err_best_g :
- pos_best_g = list(swarm[j].position_i)
- err_best_g = float(swarm[j].err_i)
- # cycle through swarm and update velocities and position
- for j in range(0, num_particles):
- swarm[j].update_velocity(pos_best_g)
- swarm[j].update_position(bounds)
- print(Decimal(err_best_g))
- GBest.append(err_best_g)
- GBest_position.append(pos_best_g)
- err_best_g = err_best_g * 10E-6
- plt.scatter(k,err_best_g)
- plt.pause(0.0001)
- # print final results
- k += 1
- print (pos_best_g)
- def parallel_func(self, j):
- a = self.swarm[j].evaluate(self.costFunc, j, self.netmap, self.cutoff_map, self.horizons, self.skeleton)
- return a
- class ThreadWithReturnValue(threading.Thread):
- def __init__(self, group=None, target=None, name=None,
- args=(), kwargs={}, Verbose=None):
- threading.Thread.__init__(self, group, target, name, args, kwargs, Verbose)
- self._return = None
- def run(self):
- if self._Thread__target is not None:
- self._return = self._Thread__target(*self._Thread__args,
- **self._Thread__kwargs)
- def join(self):
- threading.Thread.join(self)
- return self._return
- # --- RUN ----------------------------------------------------------------------+
- #
- def main():
- netmap = DataReader.read_map_from_ascii('maps/U22')
- cutoff_map = deepcopy(netmap)
- cutoff_map.cut_map(1.5)
- skeleton = DataReader.read_map_from_ascii('maps/TOP')
- horizons = []
- for i in range(1, 8):
- horizon = DataReader.read_map_from_ascii('maps/Horizon ' + str(i))
- horizons.append(horizon)
- bounds = [(1,2.5), (6,6),(3,3),(0, len(cutoff_map.grid))] #spacing factor, #wells, #producers, 1st position
- number_particles = 10
- initial = []
- for i in range(number_particles):
- init = []
- for bound in bounds:
- init.append(np.random.uniform(bound[0], bound[1]))
- initial.append(init)
- PSO(func1, initial, bounds, number_particles, 30,netmap,cutoff_map,horizons,skeleton)
- # num_cores = multiprocessing.cpu_count()
- # results = Parallel(n_jobs=num_cores)(delayed(processFile)(f) for f in listdir("C:\\Dropbox\\Data"))
- if __name__ == "__main__":
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement