Python源码示例:copy.copy.copy()
示例1
def __copy__(self):
"""
Shallow copy of a WrapLinker.
Returns
-------
object
A copy of self, where each of the linkers in self.linkers
have been shallow-copied.
It is useful because in FunctionMaker, copy.copy is called on the
Mode's linker, so that it is not modified inplace when linker.accept()
is called. In this case, we want the wrapped linkers to be copied too.
"""
other = self.__class__(
linkers=[copy(l) for l in self.linkers],
wrapper=self.wrapper)
return other
示例2
def __copy__(self):
"""
Shallow copy of a WrapLinker.
Returns
-------
object
A copy of self, where each of the linkers in self.linkers
have been shallow-copied.
It is useful because in FunctionMaker, copy.copy is called on the
Mode's linker, so that it is not modified inplace when linker.accept()
is called. In this case, we want the wrapped linkers to be copied too.
"""
other = self.__class__(
linkers=[copy(l) for l in self.linkers],
wrapper=self.wrapper)
return other
示例3
def clone(self, allow_gc=undef):
new = copy(self)
if allow_gc is not undef:
new.allow_gc = allow_gc
return new
示例4
def test_tracking_step(lattice, parametr, update_ref_values=False):
"""Tracking step function test
:parametr=0 - tracking with MethodTM() - params[Undulator] = UndulatorTestTM
:parametr=1 - tracking with default MethodTM()
"""
p = Particle(x=0.001, y=0.002)
p.E = 2.5
navi = Navigator(lattice)
dz = 0.01
P1 = []
for iii in range(int(lattice[parametr].totalLen/dz)):
tracking_step(lattice[parametr], [p], dz=dz, navi=navi)
P1.append(copy.copy(p))
tracking_step(lattice[parametr], p, dz=dz, navi=navi)
P1 = obj2dict(P1)
if update_ref_values:
return P1
p_ref = json_read(REF_RES_DIR + sys._getframe().f_code.co_name + str(parametr) +'.json')
#assert check_dict(P1, p_ref, TOL)
result = check_dict(P1, p_ref, TOL, assert_info=' P1 - ')
assert check_result(result)
示例5
def construct( s ):
@update
def upblk():
u = copy(42, 10)
示例6
def construct( s ):
import copy
s.out = OutPort( Bits32 )
@update
def upblk():
s.out @= copy.copy( 42 )
示例7
def clone(self, allow_gc=undef):
new = copy(self)
if allow_gc is not undef:
new.allow_gc = allow_gc
return new
示例8
def changesettings(self, case_id, *args, **kw ):
import copy
newsettings = copy.copy(self.settings)
for name,val in kw.iteritems():
newsettings.set( name, val )
storepath = self.db.get_case_storepath(case_id)
cfgpath = os.path.join( storepath, Includes.CASE_SETTINGSFILENAME )
newsettings.set_storefile(cfgpath)
newsettings.save()
self.settings = newsettings
return json.dumps({"success":1})
示例9
def copy(self):
"""
Construct a new IntervalTree using shallow copies of the
intervals in the source tree.
Completes in O(n*log n) time.
:rtype: IntervalTree
"""
return IntervalTree(iv.copy() for iv in self)
示例10
def iou3d(corners_3d_b1, corners_3d_b2, vol):
corners_3d_b1 = copy.copy(corners_3d_b1)
corners_3d_b2 = copy.copy(corners_3d_b2)
corners_3d_b1 = corners_3d_b1.T
corners_3d_b2 = corners_3d_b2.T
y_min_b1 = np.min(corners_3d_b1[:, 1])
y_max_b1 = np.max(corners_3d_b1[:, 1])
y_min_b2 = np.min(corners_3d_b2[:, 1])
y_max_b2 = np.max(corners_3d_b2[:, 1])
y_intersect = np.max([0, np.min([y_max_b1, y_max_b2]) - np.max([y_min_b1, y_min_b2])])
# set Z as Y
corners_3d_b1[:, 1] = corners_3d_b1[:, 2]
corners_3d_b2[:, 1] = corners_3d_b2[:, 2]
polygon_order = [7, 2, 3, 6, 7]
box_b1_bev = Polygon([list(corners_3d_b1[i][0:2]) for i in polygon_order])
box_b2_bev = Polygon([list(corners_3d_b2[i][0:2]) for i in polygon_order])
intersect_bev = box_b2_bev.intersection(box_b1_bev).area
intersect_3d = y_intersect * intersect_bev
iou_bev = intersect_bev / (box_b2_bev.area + box_b1_bev.area - intersect_bev)
iou_3d = intersect_3d / (vol - intersect_3d)
return iou_bev, iou_3d
示例11
def load_weights(model, path, remove_module=False):
"""
Simply loads a pytorch models weights from a given path.
"""
dst_weights = model.state_dict()
src_weights = torch.load(path)
dst_keys = list(dst_weights.keys())
src_keys = list(src_weights.keys())
if remove_module:
# copy keys without module
for key in src_keys:
src_weights[key.replace('module.', '')] = src_weights[key]
del src_weights[key]
src_keys = list(src_weights.keys())
# remove keys not in dst
for key in src_keys:
if key not in dst_keys: del src_weights[key]
else:
# remove keys not in dst
for key in src_keys:
if key not in dst_keys: del src_weights[key]
# add keys not in src
for key in dst_keys:
if key not in src_keys: src_weights[key] = dst_weights[key]
model.load_state_dict(src_weights)
示例12
def iou3d(corners_3d_b1, corners_3d_b2, vol):
corners_3d_b1 = copy.copy(corners_3d_b1)
corners_3d_b2 = copy.copy(corners_3d_b2)
corners_3d_b1 = corners_3d_b1.T
corners_3d_b2 = corners_3d_b2.T
y_min_b1 = np.min(corners_3d_b1[:, 1])
y_max_b1 = np.max(corners_3d_b1[:, 1])
y_min_b2 = np.min(corners_3d_b2[:, 1])
y_max_b2 = np.max(corners_3d_b2[:, 1])
y_intersect = np.max([0, np.min([y_max_b1, y_max_b2]) - np.max([y_min_b1, y_min_b2])])
# set Z as Y
corners_3d_b1[:, 1] = corners_3d_b1[:, 2]
corners_3d_b2[:, 1] = corners_3d_b2[:, 2]
polygon_order = [7, 2, 3, 6, 7]
box_b1_bev = Polygon([list(corners_3d_b1[i][0:2]) for i in polygon_order])
box_b2_bev = Polygon([list(corners_3d_b2[i][0:2]) for i in polygon_order])
intersect_bev = box_b2_bev.intersection(box_b1_bev).area
intersect_3d = y_intersect * intersect_bev
iou_bev = intersect_bev / (box_b2_bev.area + box_b1_bev.area - intersect_bev)
iou_3d = intersect_3d / (vol - intersect_3d)
return iou_bev, iou_3d
示例13
def load_weights(model, path, remove_module=False):
"""
Simply loads a pytorch models weights from a given path.
"""
dst_weights = model.state_dict()
src_weights = torch.load(path)
dst_keys = list(dst_weights.keys())
src_keys = list(src_weights.keys())
if remove_module:
# copy keys without module
for key in src_keys:
src_weights[key.replace('module.', '')] = src_weights[key]
del src_weights[key]
src_keys = list(src_weights.keys())
# remove keys not in dst
for key in src_keys:
if key not in dst_keys: del src_weights[key]
else:
# remove keys not in dst
for key in src_keys:
if key not in dst_keys: del src_weights[key]
# add keys not in src
for key in dst_keys:
if key not in src_keys: src_weights[key] = dst_weights[key]
model.load_state_dict(src_weights)
示例14
def create_surface(self, vertices, simplices, fn, alpha=.8):
"""
Method to create the polydata that define the surfaces
Args:
vertices (numpy.array): 2D array (XYZ) with the coordinates of the points
simplices (numpy.array): 2D array with the value of the vertices that form every single triangle
fn (int): id
alpha (float): Opacity
Returns:
vtk.vtkActor, vtk.vtkPolyDataMapper, vtk.vtkPolyData
"""
vertices_c = copy.deepcopy(vertices)
simplices_c = copy.deepcopy(simplices)
surf_polydata = vtk.vtkPolyData()
surf_polydata.SetPoints(self.create_surface_points(vertices_c))
surf_polydata.SetPolys(self.create_surface_triangles(simplices_c))
surf_polydata.Modified()
surf_mapper = vtk.vtkPolyDataMapper()
surf_mapper.SetInputData(surf_polydata)
surf_mapper.Update()
surf_actor = vtk.vtkActor()
surf_actor.SetMapper(surf_mapper)
surf_actor.GetProperty().SetColor(mcolors.hex2color(self.geo_model._surfaces.df.set_index('id')['color'][fn]))#self.C_LOT[fn])
surf_actor.GetProperty().SetOpacity(alpha)
return surf_actor, surf_mapper, surf_polydata
示例15
def cut_finite_fault_surfaces(geo_model, ver:dict, sim:dict):
"""Cut vertices and simplices for finite fault surfaces to finite fault ellipse
Args:
geo_model (gempy.core.model.Project): gempy geo_model object
ver (dict): Dictionary with surfaces as keys and vertices ndarray as values.
sim (dict): Dictionary with surfaces as keys and simplices ndarray as values.
Returns:
ver, sim (dict, dict): Updated vertices and simplices with finite fault
surfaces cut to ellipses.
"""
from scipy.spatial import Delaunay
from copy import copy
finite_ver = copy(ver)
finite_sim = copy(sim)
finite_fault_series = list(geo_model._faults.df[geo_model._faults.df["isFinite"] == True].index)
finite_fault_surfaces = list(
geo_model._surfaces.df[geo_model._surfaces.df._stack == finite_fault_series].surface.unique())
for fault in finite_fault_surfaces:
U, fpoints_rot, fctr_rot, a, b = get_fault_rotation_objects(geo_model, "Fault 1")
rpoints = np.dot(ver[fault], U[0])
# rpoints = np.dot(rpoints, U[-1])
r = (rpoints[:, 0] - fctr_rot[0]) ** 2 / a ** 2 + (rpoints[:, 1] - fctr_rot[1]) ** 2 / b ** 2
finite_ver[fault] = finite_ver[fault][r < 1]
delaunay = Delaunay(finite_ver[fault])
finite_sim[fault] = delaunay.simplices
# finite_sim[fault] = finite_sim[fault][np.isin(sim[fault], np.argwhere(r<0.33))]
return finite_ver, finite_sim
示例16
def make_thunk(self, **kwargs):
no_recycling = self.no_recycling
make_all = [self.linkers[0].make_all(**kwargs)]
kwargs.pop('input_storage', None)
make_all += [l.make_all(**kwargs) for l in self.linkers[1:]]
fns, input_lists, output_lists, thunk_lists, order_lists \
= zip(*make_all)
order_list0 = order_lists[0]
for order_list in order_lists[1:]:
if not order_list0 == order_list:
raise Exception(
"All linkers to WrapLinker should execute operations in the same order.")
inputs0 = input_lists[0]
outputs0 = output_lists[0]
thunk_groups = list(zip(*thunk_lists))
order = [x[0] for x in zip(*order_lists)]
to_reset = []
for thunks, node in izip(thunk_groups, order):
for j, output in enumerate(node.outputs):
if output in no_recycling:
for thunk in thunks:
to_reset.append(thunk.outputs[j])
wrapper = self.wrapper
pre = self.pre
def f():
for inputs in input_lists[1:]:
for input1, input2 in izip(inputs0, inputs):
input2.storage[0] = copy(input1.storage[0])
for x in to_reset:
x[0] = None
pre(self, [input.data for input in input_lists[0]],
order, thunk_groups)
for i, (thunks, node) in enumerate(izip(thunk_groups, order)):
try:
wrapper(i, node, *thunks)
except Exception:
raise_with_op(node, *thunks)
f.thunk_groups = thunk_groups
return f, inputs0, outputs0
示例17
def onDepth(self, ws_data):
"""
5档深度行情数据回报
:param ws_data:
:return:
"""
channel = ws_data.get('channel')
data = ws_data.get('data', {})
# 检查channel/data
if channel is None and len(data) == 0:
return
symbol = self.channelSymbolMap.get(channel)
if symbol == None:
return
# 更新tick
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
tick.bidPrice1, tick.bidVolume1 = data['bids'][0]
tick.bidPrice2, tick.bidVolume2 = data['bids'][1]
tick.bidPrice3, tick.bidVolume3 = data['bids'][2]
tick.bidPrice4, tick.bidVolume4 = data['bids'][3]
tick.bidPrice5, tick.bidVolume5 = data['bids'][4]
tick.askPrice1, tick.askVolume1 = data['asks'][-1]
tick.askPrice2, tick.askVolume2 = data['asks'][-2]
tick.askPrice3, tick.askVolume3 = data['asks'][-3]
tick.askPrice4, tick.askVolume4 = data['asks'][-4]
tick.askPrice5, tick.askVolume5 = data['asks'][-5]
tick.bidPrice1, tick.bidVolume1 = float(tick.bidPrice1), float(tick.bidVolume1)
tick.bidPrice2, tick.bidVolume2 = float(tick.bidPrice2), float(tick.bidVolume2)
tick.bidPrice3, tick.bidVolume3 = float(tick.bidPrice3), float(tick.bidVolume3)
tick.bidPrice4, tick.bidVolume4 = float(tick.bidPrice4), float(tick.bidVolume4)
tick.bidPrice5, tick.bidVolume5 = float(tick.bidPrice5), float(tick.bidVolume5)
tick.askPrice1, tick.askVolume1 = float(tick.askPrice1), float(tick.askVolume1)
tick.askPrice2, tick.askVolume2 = float(tick.askPrice2), float(tick.askVolume2)
tick.askPrice3, tick.askVolume3 = float(tick.askPrice3), float(tick.askVolume3)
tick.askPrice4, tick.askVolume4 = float(tick.askPrice4), float(tick.askVolume4)
tick.askPrice5, tick.askVolume5 = float(tick.askPrice5), float(tick.askVolume5)
tick.date, tick.time,tick.datetime = self.generateDateTime(data['timestamp'])
# print "Depth", tick.date, tick.time
# 推送tick事件
newtick = copy(tick)
self.gateway.onTick(newtick)
示例18
def make_thunk(self, **kwargs):
no_recycling = self.no_recycling
make_all = [self.linkers[0].make_all(**kwargs)]
kwargs.pop('input_storage', None)
make_all += [l.make_all(**kwargs) for l in self.linkers[1:]]
fns, input_lists, output_lists, thunk_lists, order_lists \
= zip(*make_all)
order_list0 = order_lists[0]
for order_list in order_lists[1:]:
if not order_list0 == order_list:
raise Exception(
"All linkers to WrapLinker should execute operations in the same order.")
inputs0 = input_lists[0]
outputs0 = output_lists[0]
thunk_groups = list(zip(*thunk_lists))
order = [x[0] for x in zip(*order_lists)]
to_reset = []
for thunks, node in izip(thunk_groups, order):
for j, output in enumerate(node.outputs):
if output in no_recycling:
for thunk in thunks:
to_reset.append(thunk.outputs[j])
wrapper = self.wrapper
pre = self.pre
def f():
for inputs in input_lists[1:]:
for input1, input2 in izip(inputs0, inputs):
input2.storage[0] = copy(input1.storage[0])
for x in to_reset:
x[0] = None
pre(self, [input.data for input in input_lists[0]],
order, thunk_groups)
for i, (thunks, node) in enumerate(izip(thunk_groups, order)):
try:
wrapper(i, node, *thunks)
except Exception:
raise_with_op(node, *thunks)
f.thunk_groups = thunk_groups
return f, inputs0, outputs0
示例19
def merge_equals(self, data_reducer=None, data_initializer=None):
"""
Finds all intervals with equal ranges and merges them
into a single interval. If provided, uses data_reducer and
data_initializer with similar semantics to Python's built-in
reduce(reducer_func[, initializer]), as follows:
If data_reducer is set to a function, combines the data
fields of the Intervals with
current_reduced_data = data_reducer(current_reduced_data, new_data)
If data_reducer is None, the merged Interval's data
field will be set to None, ignoring all the data fields
of the merged Intervals.
On encountering the first Interval to merge, if
data_initializer is None (default), uses the first
Interval's data field as the first value for
current_reduced_data. If data_initializer is not None,
current_reduced_data is set to a shallow copy of
data_initiazer created with
copy.copy(data_initializer).
Completes in O(n*logn).
"""
if not self:
return
sorted_intervals = sorted(self.all_intervals) # get sorted intervals
merged = []
# use mutable object to allow new_series() to modify it
current_reduced = [None]
higher = None # iterating variable, which new_series() needs access to
def new_series():
if data_initializer is None:
current_reduced[0] = higher.data
merged.append(higher)
return
else: # data_initializer is not None
current_reduced[0] = copy(data_initializer)
current_reduced[0] = data_reducer(current_reduced[0], higher.data)
merged.append(Interval(higher.begin, higher.end, current_reduced[0]))
for higher in sorted_intervals:
if merged: # series already begun
lower = merged[-1]
if higher.range_matches(lower): # should merge
upper_bound = max(lower.end, higher.end)
if data_reducer is not None:
current_reduced[0] = data_reducer(current_reduced[0], higher.data)
else: # annihilate the data, since we don't know how to merge it
current_reduced[0] = None
merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])
else:
new_series()
else: # not merged; is first of Intervals to merge
new_series()
self.__init__(merged)
示例20
def init_training_model(conf, cache_folder, conf_name):
"""
This function is meant to load the training model and optimizer, which expects
./model/<conf.model>.py to be the pytorch model file.
The function copies the model file into the cache BEFORE loading, for easy reproducibility.
"""
src_path = os.path.join('.', 'models', conf.model + '.py')
dst_path = os.path.join(cache_folder, conf.model + '.py')
# (re-) copy the pytorch model file
if os.path.exists(dst_path): os.remove(dst_path)
shutil.copyfile(src_path, dst_path)
shutil.copyfile(os.path.join('.', 'scripts', 'config', conf_name + '.py'), os.path.join(cache_folder, conf_name + '.py'))
# load and build
network = absolute_import(dst_path)
network = network.build(conf, 'train')
# multi-gpu
network = torch.nn.DataParallel(network)
# load SGD
if conf.solver_type.lower() == 'sgd':
lr = conf.lr
mo = conf.momentum
wd = conf.weight_decay
optimizer = torch.optim.SGD(network.parameters(), lr=lr, momentum=mo, weight_decay=wd)
# load adam
elif conf.solver_type.lower() == 'adam':
lr = conf.lr
wd = conf.weight_decay
optimizer = torch.optim.Adam(network.parameters(), lr=lr, weight_decay=wd)
# load adamax
elif conf.solver_type.lower() == 'adamax':
lr = conf.lr
wd = conf.weight_decay
optimizer = torch.optim.Adamax(network.parameters(), lr=lr, weight_decay=wd)
scheduler = None
if 'cosine' in conf.lr_policy:
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, conf.restart_iters, T_mult=1) # 5000 iter restart
elif 'onecycle' in conf.lr_policy:
scheduler = OneCycleLR(optimizer, max_lr=lr, total_steps=conf.max_iter)
return network, optimizer, scheduler
示例21
def init_training_model(conf, cache_folder):
"""
This function is meant to load the training model and optimizer, which expects
./model/<conf.model>.py to be the pytorch model file.
The function copies the model file into the cache BEFORE loading, for easy reproducibility.
"""
src_path = os.path.join('.', 'models', conf.model + '.py')
dst_path = os.path.join(cache_folder, conf.model + '.py')
# (re-) copy the pytorch model file
if os.path.exists(dst_path): os.remove(dst_path)
shutil.copyfile(src_path, dst_path)
# load and build
network = absolute_import(dst_path)
network = network.build(conf, 'train')
# multi-gpu
network = torch.nn.DataParallel(network)
# load SGD
if conf.solver_type.lower() == 'sgd':
lr = conf.lr
mo = conf.momentum
wd = conf.weight_decay
optimizer = torch.optim.SGD(network.parameters(), lr=lr, momentum=mo, weight_decay=wd)
# load adam
elif conf.solver_type.lower() == 'adam':
lr = conf.lr
wd = conf.weight_decay
optimizer = torch.optim.Adam(network.parameters(), lr=lr, weight_decay=wd)
# load adamax
elif conf.solver_type.lower() == 'adamax':
lr = conf.lr
wd = conf.weight_decay
optimizer = torch.optim.Adamax(network.parameters(), lr=lr, weight_decay=wd)
return network, optimizer
示例22
def set_topography(self):
# Create points on an XY grid with random Z coordinate
vertices = copy.copy(self.geo_model._grid.topography.values)
points = vtk.vtkPoints()
# for v in vertices:
# v[-1] = v[-1]
# points.InsertNextPoint(v)
if self.ve !=1:
vertices[:, 2]= vertices[:, 2]*self.ve
points.SetData(numpy_to_vtk(vertices))
# Add the grid points to a polydata object
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
#
# glyphFilter = vtk.vtkVertexGlyphFilter()
# glyphFilter.SetInputData(polydata)
# glyphFilter.Update()
#
# # Create a mapper and actor
# pointsMapper = vtk.vtkPolyDataMapper()
# pointsMapper.SetInputConnection(glyphFilter.GetOutputPort())
#
# pointsActor = vtk.vtkActor()
# pointsActor.SetMapper(pointsMapper)
# pointsActor.GetProperty().SetPointSize(3)
# pointsActor.GetProperty().SetColor(colors.GetColor3d("Red"))
# Triangulate the grid points
delaunay = vtk.vtkDelaunay2D()
delaunay.SetInputData(polydata)
delaunay.Update()
# Create a mapper and actor
triangulatedMapper = vtk.vtkPolyDataMapper()
triangulatedMapper.SetInputConnection(delaunay.GetOutputPort())
triangulatedActor = vtk.vtkActor()
triangulatedActor.SetMapper(triangulatedMapper)
self.topography_surface = triangulatedActor
self._topography_polydata = polydata
self._topography_delauny = delaunay
self.ren_list[0].AddActor(triangulatedActor)
self.ren_list[1].AddActor(triangulatedActor)
self.ren_list[2].AddActor(triangulatedActor)
self.ren_list[3].AddActor(triangulatedActor)
try:
if self.geo_model.solutions.geological_map is not None:
self.set_geological_map()
except AttributeError as ae:
warnings.warn(str(ae))