-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfea_core.py
1296 lines (1025 loc) · 52 KB
/
fea_core.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from abaqus import *
from abaqusConstants import *
import mesh, job, step, assembly, part
from shutil import copyfile, rmtree
import copy, json, warnings, sys, os, time
import numpy as np
from scipy.spatial import KDTree
from functools import partial
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from collections import OrderedDict
import displayGroupOdbToolset as dgo
import pickle, random, string
def job_submit(
jobName,
SDVINI=True, # if SDVINI subroutine is used
nodalOutputPrecision=SINGLE, # or FULL
subroutineFile='', # if having subroutine, assign its name, e.g., "subroutine.for".
numCpus=1, # avoid multiprocessing when using common blocks in Fortran.
numGPUs=0,
intruptWithError=False,
ensureOdbIsClosed=True,
):
# the possible output is the job status, e.g., 'ERROR', 'ABORTED', 'TERMINATED', 'COMPLETED'.
odbName = jobName + '.odb'
staName = jobName + '.sta'
lckName = jobName + '.lck'
if os.path.exists(lckName):
raise Exception('Error: an lck file is detected!')
if ensureOdbIsClosed and os.path.exists(odbName):
odbName = jobName+'.odb'
if odbName in session.odbs:
close_odb(odbName)
if os.path.exists(staName):
try:
os.remove(staName)
except OSError:
print 'Warning: failed to delete the sta file (%s)! Trying once more after 30 Sec.'
time.sleep(30)
os.remove(staName)
if SDVINI:
modelName = mdb.jobs[jobName].model
keywordBlock = mdb.models[modelName].keywordBlock
keywordBlock.setValues(edited = 0)
keywordBlock.synchVersions(storeNodesAndElements=False)
def _give_idx(searchCriteria):
return [line.startswith(searchCriteria) for line in keywordBlock.sieBlocks].index(True)
try:
idx = _give_idx('** ----------------------------------------'\
'------------------------\n** \n** STEP:')
keywordBlock.insert(idx-1, '\n*INITIAL CONDITIONS, TYPE=SOLUTION, USER')
except:
raise Exception(
'Error: "job_submit" function cannot insert the relavant SDVINI keyboard. '
'Either fix the function, or insert SDVINI = False, if possible.'
)
job = mdb.jobs[jobName]
job.setValues(
nodalOutputPrecision=nodalOutputPrecision,
userSubroutine=os.path.join(os.getcwd(), subroutineFile),
numCpus=numCpus,
numGPUs=numGPUs
)
try:
job.submit(consistencyChecking=ON)
# job.waitForCompletion() should not be placed just after
# job.submit(), as it might cause rare strange errors.
# Instead, the code checks constantly the lck file.
# job.waitForCompletion() is still neccessary, otherwise
# the status of the job in CAE may not be updated sometimes.
print '%s is just submitted!'%jobName
while not os.path.exists(lckName):
time.sleep(2)
# printing the sta during submission, while waiting for the lck to be deleted.
pos = 0L
while os.path.exists(lckName):
time.sleep(5)
if os.path.exists(staName):
try:
with open(staName, "r") as f:
f.seek(pos)
for line in f.readlines():
print line.strip()
pos = f.tell()
except OSError as ex:
print 'Warning: an os error was catched (%s)!' % ex
finally:
f.close()
job.waitForCompletion()
status = str(job.status)
except AbaqusException as ex:
job.waitForCompletion()
if intruptWithError == True:
raise ex
else:
print 'Warning: an error is avoided during job submitting (i.e., %s)' % ex
while os.path.exists(lckName):
print 'Waiting for the lck file to be deleted!'
time.sleep(10)
status = 'ERROR'
time.sleep(5)
return status
def copy_model(NameOfNewModel, nameOfCopiedModel):
mdb.Model(name = NameOfNewModel, objectToCopy = mdb.models[nameOfCopiedModel])
def copy_job(NameOfNewJob, nameOfCopiedJob):
mdb.Job(name = NameOfNewJob, objectToCopy = mdb.jobs[nameOfCopiedJob])
def open_odb(odbName, readOnly=True):
_open_odb_fn = lambda: session.openOdb(name=odbName, readOnly=readOnly)
try:
return _open_odb_fn()
except Exception as ex1: # if it does not work try again after 5Sec.
try:
return _open_odb_fn()
except Exception as ex2:
raise Exception('Error: open_odb() did not work. \n%s\n%s'%(ex1, ex2))
def close_odb(odbName, saveOdb = False):
try:
odbObj = session.odbs[odbName]
if saveOdb == True:
odbObj.save()
odbObj.close()
except Exception as ex:
raise Exception('Error: close_odb() did not work.\n%s'%(ex))
def close_all_odbs():
for odbObj in session.odbs.values():
odbObj.close()
def unit_vector(vector):
norm = np.linalg.norm(vector)
if (norm==0):
vector += 1e-6
norm = np.linalg.norm(vector)
return vector / norm
class nonlipls_tools():
"""This class contains all the pre-stressing and relevant intialization code,
where the inital state is claculate and later used for calculation correct pre-stress
state. The way a high-fidelity cartilage model can then be run."""
def __init__(
self,
jobName,
modelName,
materialName='CAR_UMAT',
sectionName='CAR',
stepName='EQ',
txtFolderName='txt',
subroutineFile='subroutines.for',
numSdv=200,
):
# ensuring the step is not surrpressed.
modelObj = mdb.models[modelName]
stepObj = modelObj.steps[stepName]
if stepObj.suppressed:
stepObj.resume()
# fixing the outputs viriables of the fieldOutputRequest.
variables = modelObj.steps[stepName].fieldOutputRequestStates[stepName].variables
variables = set(variables) | {'LE', 'U', 'UR', 'SDV', 'COORD'}
modelObj.fieldOutputRequests[stepName].setValues(variables=tuple(variables))
copy_model(modelName + '-Backup', modelName)
self.odbNameWithoutOptimizaion = jobName +'.odb'
self.jobNameWithoutOptimizaion = jobName
self.modelNameWithoutOptimizaion = modelName
self.modelName = modelName + '-withEQ'
self.odbName = jobName + '-withEQ.odb'
self.jobName = jobName + '-withEQ'
self.txtFolderName = txtFolderName
self.modelNameInverse = modelName + '-Inverse'
self.odbNameInverse = jobName + '-Inverse.odb'
self.jobNameInverse = jobName + '-Inverse'
self.stepName = stepName
self.materialName = materialName
self.numSdv = numSdv
self.sectionName = sectionName
self.job_submit = partial(
job_submit, subroutineFile=subroutineFile, intruptWithError=True
)
def _set_umat_key(self, modelObj, umat_key):
# ensure correct material and section definitions
modelObj.fieldOutputRequests[self.stepName].setValues(position=INTEGRATION_POINTS)
modelObj.sections[self.sectionName].setValues(material=self.materialName, thickness=None)
materialObj = modelObj.materials[self.materialName]
materialObj.Depvar(n = self.numSdv)
userMaterialObj = materialObj.userMaterial
props = copy.deepcopy(userMaterialObj.mechanicalConstants)
props[2] = umat_key
userMaterialObj.setValues(mechanicalConstants=props)
def _focus_on_first_step(self, modelObj):
for key in modelObj.steps.keys():
stepObj = modelObj.steps[key]
if key == 'Initial':
continue
if key == self.stepName:
stepObj.resume()
else:
stepObj.suppress()
def _resume_all_steps(self, modelObj):
for key in modelObj.steps.keys():
stepObj = modelObj.steps[key]
if key != 'Initial':
stepObj.resume()
def initialize_params(self, *keys):
# finding the normalized depth and local plan, and initializing the parameters.
if os.path.exists(self.txtFolderName):
rmtree(self.txtFolderName)
os.makedirs(self.txtFolderName)
with open(os.path.join(self.txtFolderName, 'DATA.txt'), "w") as f:
f.write('0\n')
modelObj = mdb.models[self.modelNameWithoutOptimizaion]
self._set_umat_key(modelObj, 0.0)
self._focus_on_first_step(modelObj)
self.job_submit(jobName=self.jobNameWithoutOptimizaion)
odb = open_odb(self.odbNameWithoutOptimizaion)
assemblyObj = odb.rootAssembly
frameObj0 = odb.steps[self.stepName].frames[0]
# get the instance name as it is converted by Abaqus to a general instance for all the model in ODB
instanceOdbName = odb.rootAssembly.instances.keys()[0]
# get all the nodeset region object
regionNodeSets = odb.rootAssembly.instances[instanceOdbName].nodeSets
regionElementSets = odb.rootAssembly.instances[instanceOdbName].elementSets
def _get_coords_of_a_nodeset(nodeSetName):
valueObj = frameObj0.fieldOutputs['COORD'].getSubset(region=regionNodeSets[nodeSetName],
position=NODAL).values
return np.array([i.data for i in valueObj], dtype=np.float32)
def _point_data_of_a_substructure(cartilageKey = 'LAT_CARTILAGE'):
topCoords = _get_coords_of_a_nodeset('TOP_'+cartilageKey)
bottomCoords = _get_coords_of_a_nodeset('BOTTOM_'+cartilageKey)
bottomTree = KDTree(bottomCoords)
topTree = KDTree(topCoords)
centralPoint = np.mean(np.concatenate((topCoords, bottomCoords)), axis = 0, dtype=np.float32)
centralPoint += 1e-8 # just to avoid zero devision
####### extracting integration point data #######
pointSetCoords = []
for fieldName in ['SDV91', 'SDV92', 'SDV93']:
valueObj = frameObj0.fieldOutputs[fieldName].getSubset(region=regionElementSets[cartilageKey],
position=INTEGRATION_POINT).values
if fieldName == 'SDV91': # only once in the loop:
coordsFromSdv = np.array([[i.elementLabel, i.integrationPoint, i.data] for i in valueObj],
dtype=np.float32)
else:
coordsFromSdv = np.array([[i.data] for i in valueObj], dtype=np.float32)
pointSetCoords.append(coordsFromSdv)
temp1, temp2, temp3 = np.hsplit(pointSetCoords[0], 3)
pointSetCoords = np.array([temp1, temp2, temp3, pointSetCoords[1], pointSetCoords[2]],
dtype=np.float32)
pointSetCoords = np.squeeze(pointSetCoords).T # deleting redundant axis and transposing
def _get_one_unit(point, numPoints = 1):
'''A helper function:
"point" is the array of points,
"numPoints" is the number of nodal points averaged'''
topDistance , topId = topTree.query(point, numPoints)
bottomDistance , bottomId = bottomTree.query(point, numPoints)
topDistance = np.mean(topDistance)
bottomDistance = np.mean(bottomDistance)
totalDistance = topDistance + bottomDistance
depth = topDistance / totalDistance
topPoint = topCoords[topId]
bottomPoint = bottomCoords[bottomId]
unit1 = unit_vector(bottomPoint - topPoint) # unit of the depth (first important unit)
vec3 = np.cross(unit1, topPoint - centralPoint) # perpendicular to the surface of central point and depth vector.
unit3 = unit_vector(vec3)
vec2 = np.cross(unit3, unit1) # in the surface of central point and depth vector but perpendicular to the depth vector
unit2 = unit_vector(vec2) # second important unit
return np.hstack([depth, unit1, unit2, unit3])
# Output is element label, integration point lable, depth, units for each point
return np.array([np.concatenate([pointData[0:2], _get_one_unit(pointData[2:])])
for pointData in pointSetCoords], dtype=np.float32)
def _point_sdv_data(point_data):
'''It takes point_data, i.e., [element, integration point, depth, and units]
and returns [element, integration point, and relevant sdvs]'''
element = point_data[0]
integrationPoint = point_data[1]
depth = point_data[2]
unit1 = point_data[3:6]
unit2 = point_data[6:9]
unit3 = point_data[9:12]
sdv2=1.4 * (depth ** 2) - 1.1 * depth + 0.59 # FIBER CONSTANT
sdv3=0.1 + 0.2 * depth # SOLID MATERIAL CONSTANT
alpha1=[None] * 10
alpha1[0]=0.005
alpha1[1]=0.01
alpha1[2]=0.025
alpha1[3]=0.035
alpha1[4]=0.042
alpha1[5]=0.048
alpha1[6]=0.053
alpha1[7]=0.058
alpha1[8]=0.06
alpha1[9]=0.06 # the deepest points have index 9 but it is still part its upper leyer (with index 8).
sdv1=alpha1[int(depth*9)] # GAG CONSTANT
# for primary fibril vectors 1 and 2
pFibrilVec1 = depth*unit1 + (1.0-depth)*unit2
pFibrilVec2 = depth*unit1 - (1.0-depth)*unit2
pFibrilVec1 = unit_vector(pFibrilVec1)
pFibrilVec2 = unit_vector(pFibrilVec2)
sFibrilVec1 = unit1
sFibrilVec2 = unit2
sFibrilVec3 = unit3
sFibrilVec4 = unit_vector( unit1 + unit2 + unit3)
sFibrilVec5 = unit_vector(-unit1 + unit2 + unit3)
sFibrilVec6 = unit_vector( unit1 - unit2 + unit3)
sFibrilVec7 = unit_vector( unit1 + unit2 - unit3)
return np.hstack([
element, integrationPoint, sdv1, sdv2, sdv3, pFibrilVec1,
pFibrilVec2, sFibrilVec1, sFibrilVec2, sFibrilVec3, sFibrilVec4,
sFibrilVec5, sFibrilVec6, sFibrilVec7
])
data = []
for key in keys:
subData = _point_data_of_a_substructure(key)
# Depth is normalized within [0,1] (as it is already around 0.1 to 0.9)
subData[:,2] = (subData[:,2] - np.min(subData[:,2]))/np.ptp(subData[:,2])
data.append(
np.array([_point_sdv_data(point_data) for point_data in subData], dtype=np.float32)
)
data = np.concatenate(data, axis = 0)
elementArray = np.unique(data[:,0])
for element in elementArray:
np.savetxt(os.path.join(self.txtFolderName, '%i.txt'%(element)),
data[data[:,0] == element][:,1:],
delimiter=',',
fmt='%10.7f')
with open(os.path.join(self.txtFolderName, 'DATA.txt'), "w") as f:
f.write('1\n')
self._resume_all_steps(modelObj)
self._set_umat_key(modelObj, 1.0)
print 'INITIALIZATION IS COMPLETED! \n'
def run_prestress_optimizer(
self,
key,
sdvList=['SDV%s'%(i) for i in (range(1,4) + range(16,43))],
zeta=1.0,
breakPoint=0,
errorLimit=1e-3,
maxiteration=50,
eta=4.0,
):
# main pre-stress function
zeta = float(zeta) # avoiding problem with integer division
modelWithoutOptimizaionObj = mdb.models[self.modelNameWithoutOptimizaion]
self._set_umat_key(modelWithoutOptimizaionObj, 1.0)
self._focus_on_first_step(modelWithoutOptimizaionObj)
# ensure the correct naming of all cartilage sets.
nodeSet = key + '_NODES'
elementSet = key + '_ELEMENTS'
# creating helper models, jobs, sets, and BCs
rootAssemblyObj = modelWithoutOptimizaionObj.rootAssembly
nodeObj = rootAssemblyObj.sets[nodeSet].nodes
nodeNameList = ['TEMP-%s'%(i) for i in xrange(1, len(nodeObj)+1)]
for i in xrange(len(nodeNameList)):
rootAssemblyObj.Set(nodes=(nodeObj[i:i+1],), name=nodeNameList[i])
for name in [self.modelName, self.modelNameInverse]:
copy_model(name, self.modelNameWithoutOptimizaion)
for name in [self.jobName, self.jobNameInverse]:
copy_job(name, self.jobNameWithoutOptimizaion)
mdb.jobs[self.jobNameInverse].setValues(model=self.modelNameInverse)
mdb.jobs[self.jobName].setValues(model=self.modelName)
steps = mdb.models[self.modelName].steps
stepsWithoutEQ = modelWithoutOptimizaionObj.steps
BCobj = mdb.models[self.modelNameInverse].boundaryConditions
BCkeys = BCobj.keys()
for i in BCkeys:
BCobj[i].suppress()
modelObjTemp = mdb.models[self.modelNameInverse]
for i in xrange(len(nodeNameList)):
modelObjTemp.VelocityBC(name=nodeNameList[i], # bc name and node names are the same.
createStepName='Initial',
region=modelObjTemp.rootAssembly.sets[nodeNameList[i]],
v1=SET,
v2=SET,
v3=SET,
amplitude=UNSET,
localCsys=None,
distributionType=UNIFORM,
fieldName='')
# some helper functions
def _integration_points_values(odb, parameters=['SDV3'], frameNum=-1):
instanceOdbName = odb.rootAssembly.instances.keys()[0] # refers to all
regionElementSets = odb.rootAssembly.instances[instanceOdbName].elementSets
frameObj = odb.steps[self.stepName].frames[frameNum]
return [np.ravel([[item.data, item.elementLabel, item.integrationPoint]
for item in frameObj.fieldOutputs[sdvName].getSubset(region=
regionElementSets[elementSet], position=
INTEGRATION_POINT).values])
for sdvName in parameters]
def _extract_coords_values(odb, frameNum = -1):
instanceOdbName = odb.rootAssembly.instances.keys()[0] # refers to all
regionNodeSets = odb.rootAssembly.instances[instanceOdbName].nodeSets
frameObj = odb.steps[self.stepName].frames[frameNum]
# a list of [[node label], [node coord 1, node coord 2, ...], ...],
# then flatten the list.
return [nodeDataElement
for nodeName in nodeNameList
for nodeData in ([nodeName],
frameObj.fieldOutputs['COORD'].getSubset(region
=regionNodeSets[nodeName], position=NODAL
).values[0].data.tolist()
)
for nodeDataElement in nodeData]
def _edit_node_by_offset(displacementFromInitial, modelName):
rootAssemblyObj = mdb.models[modelName].rootAssembly
num = 0
for i in displacementFromInitial:
num += 1
if num % 4 == 1:
nodeLabel = i
elif num % 4 == 2:
u1 = -i
elif num % 4 == 3:
u2 = -i
elif num % 4 == 0:
u3 = -i
rootAssemblyObj.editNode(nodes=rootAssemblyObj.sets[nodeLabel].nodes[0],
offset1=u1,
offset2=u2,
offset3=u3,
projectToGeometry=OFF)
def _inverse_run(displacementFromInitial):
ModelObjTemp = mdb.models[self.modelNameInverse]
bcStateObj = ModelObjTemp.steps[self.stepName].boundaryConditionStates
num = 0
for i in displacementFromInitial:
num += 1
if num % 4 == 1:
bcLabel = i
elif num % 4 == 2:
# v1 = bcStateObj[bcLabel].v1-i
v1 = -i
elif num % 4 == 3:
# v2 = bcStateObj[bcLabel].v2-i
v2 = -i
elif num % 4 == 0:
# v3 = bcStateObj[bcLabel].v3-i
v3 = -i
ModelObjTemp.boundaryConditions[bcLabel].setValuesInStep(stepName=self.stepName,
v1=v1,
v2=v2,
v3=v3)
with open(os.path.join(self.txtFolderName, 'DATA.txt'), "w") as f:
f.write('-1\n')
self.job_submit(self.jobNameInverse)
with open(os.path.join(self.txtFolderName, 'DATA.txt'), "w") as f:
f.write('1\n')
def _new_SDV_in_fortran(newSdvData):
IntegrationPointArray = np.unique(newSdvData[0][2::3]) # [1.0, 2.0, 3.0, 4.0, ...]
IntegrationCount = IntegrationPointArray[-1].max() # number of all integration points
_, ElementsIdx = np.unique(newSdvData[0][1::3], return_index=True) # e.g., [0L, 27L, 54L, ...]
elementCount = ElementsIdx[1] # all elements have the same number of nodes
for ElementsIdxItem in ElementsIdx:
elementIdxArray = ElementsIdxItem*3 + 1 + np.arange(0, 3*IntegrationCount, 3, dtype=int)
ValueIdxArray = elementIdxArray - 1
IntegrationPointIdxArray = elementIdxArray + 1
elementItem = newSdvData[0][elementIdxArray[0]]
sdvDataList = np.concatenate((IntegrationPointArray.reshape(1,-1),
np.take(newSdvData, ValueIdxArray, axis = -1)))
np.savetxt(os.path.join(self.txtFolderName, '%i.txt'%(elementItem)),
sdvDataList.T,
delimiter=',',
fmt='%10.7f')
if self.job_submit(self.jobNameWithoutOptimizaion) == 'ABORTED':
raise Exception('ERROR! TOTALLY UNSTABLE MODEL')
odbObjWithoutOptimization = open_odb(self.odbNameWithoutOptimizaion)
initialNodalCoords = _extract_coords_values(odbObjWithoutOptimization, 0)
copyfile(self.odbNameWithoutOptimizaion, self.odbName)
def _calculate_r_u(zeta):
odb = open_odb(self.odbName)
newNodalCoords = _extract_coords_values(odb, -1)
close_odb(self.odbName)
displacementFromInitial = [(newNodalCoords[i]-initialNodalCoords[i])*zeta
if i % 4 != 0
else newNodalCoords[i] # just the label
for i in xrange(len(newNodalCoords))]
newError = np.linalg.norm(np.array(
[displacementFromInitial[i] for i in xrange(len(displacementFromInitial)) if i % 4 != 0]
)) / zeta
return newError, displacementFromInitial
newError, displacementFromInitial = _calculate_r_u(zeta)
self.optimizerStatus = {'step': [1], 'error': [newError], 'zeta': [zeta]}
failed=False
iterationNumber = 1
newSdvDataBackup = _integration_points_values(odbObjWithoutOptimization, sdvList, 0)
close_odb(self.odbNameWithoutOptimizaion)
previousError = newError
while True:
if iterationNumber == maxiteration:
failed = True
break
else:
iterationNumber += 1
copy_model(self.modelName + '-Backup', self.modelName)
_edit_node_by_offset(displacementFromInitial, self.modelName)
copy_model(self.modelNameInverse + '-Backup', self.modelNameInverse)
_inverse_run(displacementFromInitial)
_edit_node_by_offset(displacementFromInitial, self.modelNameInverse)
odbInverse = open_odb(self.odbNameInverse)
newSdvData = _integration_points_values(odbInverse, sdvList, -1)
close_odb(self.odbNameInverse)
_new_SDV_in_fortran(newSdvData)
if self.job_submit(self.jobName) != 'ABORTED':
newError, displacementFromInitial = _calculate_r_u(zeta)
if previousError < newError:
successfulStep = False
else:
successfulStep = True
else:
successfulStep = False
print '\n** #STEP: %s | ERROR: %s | ZETA: %s **\n' % (iterationNumber, newError, zeta)
self.optimizerStatus['step'].append(iterationNumber)
self.optimizerStatus['error'].append(newError)
self.optimizerStatus['zeta'].append(zeta)
if errorLimit > newError:
failed = False
break
if successfulStep == True:
newSdvDataBackup = copy.deepcopy(newSdvData)
previousError = newError
else:
_new_SDV_in_fortran(newSdvDataBackup)
zeta = zeta/eta
if zeta < 0.0001:
failed = True
break
copy_model(self.modelName, self.modelName + '-Backup')
copy_model(self.modelNameInverse, self.modelNameInverse + '-Backup')
# finish_optimization
modelsObj = mdb.models
self._resume_all_steps(modelsObj[self.modelName])
self._resume_all_steps(modelsObj[self.modelNameWithoutOptimizaion])
del modelsObj[self.modelName+'-Backup']
del modelsObj[self.modelNameInverse]
del modelsObj[self.modelNameInverse+'-Backup']
del mdb.jobs[self.jobNameInverse]
for tempSetName in nodeNameList:
for modelName in [self.modelName, self.modelNameWithoutOptimizaion]:
del modelsObj[modelName].rootAssembly.sets[tempSetName]
if failed == True:
print 'PRE_STRESSING HAS NOT BEEN FULLY CONVERGED! \n'
return 'ABORTED'
else:
print 'PRE_STRESSING HAS BEEN COMPLETED! \n'
return 'COMPLETED'
class file_io():
def __init__(self, mainDir, subDir):
writePath = os.path.join(mainDir, subDir)
if os.path.exists(writePath):
rmtree(writePath)
os.makedirs(writePath)
else:
os.makedirs(writePath)
self.new_address = lambda filePath: os.path.join(writePath, filePath)
self.reset()
def reset(self):
self._sample_num = 1
self.mataOfArrays, self.metaOfDicts, self._seen_keys = {}, {}, {}
def record_csv(self, data_dict):
for key in data_dict:
with open(self.new_address(key) + ".csv", "w") as f:
np.savetxt(f, data_dict[key], delimiter=",")
def _new_sample(self):
self._sample_num += 1
self._seen_keys = {key: False for key in self._seen_keys}
def store_data(self, key, array):
# The input array can be a single array or a dictionary of arrays, with a shape
# [num nodes or edges or 1, num frames or 1, ...], where only num frames can be varied.
if self._sample_num == 1:
# if a key is repeated make new sample otherwise add that key to self._seen_keys.
if key in self._seen_keys:
self._new_sample()
else:
self._seen_keys[key] = True
else:
if key in self._seen_keys:
# in a step, if a key was seen, make a new sample
# but before that verify the other keys were already seen.
if self._seen_keys[key] == False:
self._seen_keys[key] = True
else:
if False in self._seen_keys.values():
raise Exception('Error: file_io got an already seen key (' + key +
') before seeing the others in a step num > 1.')
else:
self._new_sample()
else:
raise Exception('Error: file_io got a new key ('+ key +
'), which was not seen before in a step num > 1.')
def _store_single_array(key, array):
# save array and update mataOfArrays,
assert array.ndim > 1, Exception('array.ndim in store_data() should be more than 2.')
dtype = array.dtype
if dtype == np.float64:
array = array.astype(np.float32)
elif dtype == np.int64:
array = array.astype(np.int32)
dtypeName = array.dtype.name
flat_array = array.reshape(-1)
if key not in self.mataOfArrays:
shape = array.shape
if shape[1] == 1:
numFrames = 1L
else:
numFrames = -1L
shape = (shape[0], numFrames, ) + shape[2:]
# update meta
self.mataOfArrays[key] = {"shape": shape, "ndim": array.ndim, "dtype": dtypeName}
openMode = 'wb'
else:
openMode = 'ab'
# self.record({key: array})
with open(self.new_address(key)+'.npy', openMode) as f:
np.save(f, flat_array)
if type(array) == dict:
# store keys as long integers that can be dumped as JSON.
self.metaOfDicts[key] = [long(k) for k in array.keys()]
for k, val in array.items():
_store_single_array(key+"_"+str(k), val)
else:
_store_single_array(key, array)
def store_meta(self, **additionalMetaData):
# adding possible addtional meta data and storing them
meta = {
"arrays": self.mataOfArrays,
"dicts": self.metaOfDicts,
"array_names": self.mataOfArrays.keys(),
"total_num_samples": self._sample_num
}
meta.update(additionalMetaData)
with open(self.new_address("meta.json"), "w") as f:
json.dump(meta, f, indent=2, separators=(", ", ": "))
class timer_tools():
# set different timers, each can repeatedly.
def __init__(self, *args):
assert len(args) != 0, Exception(
'Error: Feed an arbitrary input string to the timer contructor, e.g., "fea".'
)
self.times = {key: [] for key in args}
self.keys = args
# for the first start
self.isStarted = False
self.delta = 0
def start(self, key):
assert ~self.isStarted, Exception('Error: The timer has already been started.')
self.t0 = time.time()
self.isStarted = True
def pause(self, key):
assert self.isStarted, Exception('Error: The timer shoud be started to be paused.')
self.delta += time.time() - self.t0
self.isStarted = False
def stop(self, key):
assert self.isStarted, Exception('Error: The timer shoud be started to be stopped.')
self.pause(key)
self.times[key].append(self.delta)
self.delta = 0 # for the next possible start
def ignore(self, key):
self.isStarted = False
def get(self, with_pre=True):
# use this method to get all the time measures in the end.
if with_pre == True:
time_pre = 'time_'
else:
time_pre = ''
return {time_pre+key: np.array(self.times[key], dtype="float32") for key in self.keys}
class field_tools():
# tools to make new calculated field from the obtained results
def __init__(
self,
odb,
instance,
nodeSetKey,
fileIo,
excludingSteps=set(),
position=NODAL,
jobStatus='COMPLETED',
mainName=''
):
assert type(excludingSteps) == set, Exception('Error: excludingSteps argument contains set!')
stepsObj = odb.steps
stepskeys = stepsObj.keys()
# some of the excludingSteps might not exists which are eleminated
excludingSteps = excludingSteps.intersection(set(stepskeys))
baseTime = max([0]+[stepsObj[k].totalTime+stepsObj[k].timePeriod for k in excludingSteps])
frames, times, relativeTimesDict = [], [], OrderedDict()
for step in stepsObj.values():
stepName = step.name
if stepName not in excludingSteps:
initialTime = step.totalTime - baseTime
# step.frames is an ODB Sequence does not support slicing, thus covert it to list.
stepFrames = [frame for frame in step.frames]
if initialTime != 0.0:
stepFrames = stepFrames[1:]
relativeTimes = np.array([frame.frameValue for frame in stepFrames])
times.append(initialTime + relativeTimes)
frames.extend(stepFrames)
if len(relativeTimes) > 1:
relativeTimesDict[stepName] = np.array(relativeTimes).reshape(-1, 1)
times = np.concatenate(times, axis=0)
# if job was aborted, the last frame was not converged and should be deleted
if jobStatus == 'ABORTED':
lastStepName = stepskeys[-1]
frames = frames[:-1]
times = times[:-1]
if lastStepName in relativeTimesDict:
relativeTimesDict[lastStepName] = relativeTimesDict[lastStepName][:-1]
self.frames = frames
self.times = times
self.relativeTimes = relativeTimesDict
self.region = instance.nodeSets[nodeSetKey]
self.instance = instance
self.position = position
self.odb = odb
if mainName != '':
self.mainName = mainName + '_'
else:
self.mainName = mainName
self.store_data = fileIo.store_data
def record_trajectory_length(self):
# record total simulation time of each frame
self.store_data('trajectory_length', np.array(len(self.frames)).reshape([1, 1, 1]))
def record_time(self):
# record total simulation time of each frame
self.store_data(self.mainName+'time', self.times.reshape([1, -1, 1]))
def get_values(self, field, frame):
return frame.fieldOutputs[field].getSubset(region=self.region, position=self.position).values
def extract_data(self, field, component='data', frame_first=False):
data = np.array([
[getattr(value, component) for value in self.get_values(field, frame)] for frame in self.frames
])
if data.ndim == 2:
data = np.expand_dims(data, axis = -1)
if frame_first==False:
data = data.transpose([1, 0, 2])
return data
def get_sample_value_obj(self, field, frameNum = -1):
return self.get_values(field, self.frames[frameNum])
def show_components(self, field, frameNum = -1):
components = dir(self.get_sample_value_obj(field)[0])
return [component for component in components if component[:2] != '__']
def make_tensor_from_sdvs(self, sdvNumsList, name):
numComponents = len(sdvNumsList)
if numComponents == 6:
validInvariants = (MISES, TRESCA, PRESS, INV3, MAX_PRINCIPAL, MID_PRINCIPAL, MIN_PRINCIPAL,)
tensor_type=TENSOR_3D_FULL
elif numComponents in [3 , 2]:
validInvariants = (MAGNITUDE, )
tensor_type=VECTOR
else:
raise Exception('The "make_tensor_from_sdvs" function does not support this tensor type')
data = [self.extract_data('SDV'+str(sdvNum), frame_first=True) for sdvNum in sdvNumsList]
data = np.concatenate(data, axis = -1)
nodeLabels = [value.nodeLabel for value in self.get_sample_value_obj('SDV'+str(sdvNumsList[0]))]
# frame.frameId might not be sequential, so avoid it, instead I use:
num = 0
for frame in self.frames:
custom_field = frame.FieldOutput(
name=name, description=name, type=tensor_type,
)
custom_field.addData(
position=NODAL, instance=self.instance, labels=nodeLabels, data=data[num]
)
num += 1
def record_field(
self, field, name, invariantsList=None, relative=False, dtype='float32', return_data=False
):
data = self.extract_data(field)
if relative == True:
data -= data[:,0:1]
name = self.mainName + name
self.store_data(name, data.astype(dtype))
if return_data==True:
data_dict = {name: data}
if invariantsList != None:
name += "_"
for invariant in invariantsList:
data = self.extract_data(field, invariant)
if relative == True:
data -= data[:,0:1]
invariantName = name + invariant
self.store_data(invariantName, data.astype(dtype))
if return_data==True:
data_dict[invariantName] = data