-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDashed_single_stroke_opt.py
280 lines (248 loc) · 9.77 KB
/
Dashed_single_stroke_opt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
import pydiffvg
import torch
import skimage
import numpy as np
# Use GPU if available
pydiffvg.set_use_gpu(torch.cuda.is_available())
canvas_width, canvas_height = 256, 256
num_control_points = torch.tensor([2])
points = torch.tensor([[120.0, 30.0], # base
[150.0, 60.0], # control point
[ 90.0, 198.0], # control point
[ 60.0, 218.0]]) # base
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
is_closed = False,
stroke_width = torch.tensor(5.0))
shapes = [path]
path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]),
fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]),
stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8]))
shape_groups = [path_group]
scene_args = pydiffvg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
# Visibility function
def visibility_function(t):
sin_t = np.sin(10*t)
sin_t= round(sin_t,2)
return sin_t
# Finding the zeros of the Visibility function
def find_zeros ():
t_samples = np.arange(0,1.01,0.01)
num_samples= len(t_samples)
a_values = np.array([visibility_function(t) for t in t_samples])
zeros= []
for i in range(num_samples-1):
if abs(a_values [i])<1e-10:
zeros.append(t_samples[i])
elif abs(a_values[i+1])< 1e-10:
zeros.append(t_samples[i+1])
elif a_values[i] *a_values[i+1] <0:
# when sign change detected, compute the zero using linear interpolation
t1 = t_samples [i]
t2 = t_samples [i+1]
a1 = a_values[i]
a2 = a_values [i+1]
# Linear interpolation to find the zero
zero = t1 -a1*(t2-t1)/ (a2-a1)
#zero= np.interp(0, [a1, a2], [t1, t2])
zeros.append(zero)
return zeros
# Split Bezier curve at the zeros of the visibility function
def split_bezier_at_T(control_points,t):
"""
split_bezier_at_T splits a Bézier curve into two segments at parameter t.
Inputs:
- controlPoints: An n x 2 matrix where each row represents a control point.
- t: A vector of parameter values at which to split the curve.
Outputs:
- left: Control points of the left segment.
- right: Control points of the right segment.
"""
n = control_points.shape[0] # Number of control points
left = torch.zeros(n, 2) # Initialize left segment
right = torch.zeros(n, 2) # Initialize right segment
points=control_points.clone()
for r in range(n):
left[r, :] = points[0, :]
for i in range(n - r-1):
points[i, :] = (1 - t) * points[i, :] + t * points[i + 1, :] #Interpolation
right[r, :] = points[n-r-1, :] # The last point of current segment
right= torch.flipud(right)
return [left,right]
def split_bezier(control_points, tValues):
"""
split_bezier splits a Bézier curve at multiple parameter values.
Inputs:
- control_points: An n x 2 matrix where each row represents a control point.
- t_values: A vector of parameter values at which to split the curve.
Outputs:
- segments: A list containing the control points of the resulting curve segments.
"""
# Sort the parameter values to ensure correct sequential splitting
tValues = np.sort(tValues)
# Initialize a list to hold the control points of the split segments
segments = []
remaining_points = torch.clone(control_points)
for t in tValues:
# Split the curve at t
[left, right] = split_bezier_at_T(remaining_points, t)
# Store the left segment
segments.append(left)
remaining_points= right
segments.append(remaining_points)
return segments
# Selecting the visible segments
def split_separate_path(controlPoints):
zeros=[]
even_segments= []
#Finding the zeros
for t in np.arange(0,1.001,0.001):
if visibility_function (t) ==0:
zeros.append(t)
tValues = zeros
for z in tValues:
print(tValues)
#Splitting the curve
segments = (split_bezier(controlPoints, tValues))
#Finding the even segment
i=0
for segment in segments:
i+=1
if i % 2 ==0:
even_segments.extend([segment])
else:
continue
shapes = []
shape_groups =[]
#Updated segments for rendering
i = 0
for segment in even_segments:
points = segment
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
is_closed = False,
stroke_width = torch.tensor(5.0))
shapes.extend([path])
path_group= pydiffvg.ShapeGroup(shape_ids = torch.tensor([i]),
fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]),
stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8]))
shape_groups.extend([path_group])
i+=1
return shapes, shape_groups
[shapes,shape_groups] = split_separate_path(controlPoints)
scene_args = pydiffvg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = pydiffvg.RenderFunction.apply
img = render(256, # width
256, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # background_image
*scene_args)
# The output image is in linear RGB space. Do Gamma correction before saving the image.
pydiffvg.imwrite(img.cpu(), 'results/vis_single_stroke/target.png', gamma=2.2)
target = img.clone()
# Move the path to produce initial guess
# Normalize points for easier learning rate
points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base
[155.0/256.0, 65.0/256.0], # control point
[100.0/256.0, 180.0/256.0], # control point
[ 65.0/256.0, 238.0/256.0]], # base
requires_grad = True)
stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True)
stroke_width_n = torch.tensor(10.0 / 100.0, requires_grad=True)
path.points = points_n * 256
[shapes, shape_groups]= split_separate_path(path.points)
path.stroke_width = stroke_width_n * 100
path_group.stroke_color = stroke_color
def find_zeros ():
t_samples = np.arange(0,1.01,0.01)
num_samples= len(t_samples)
a_values = np.array([visibility_function(t) for t in t_samples])
zeros= []
for i in range(num_samples-1):
if abs(a_values [i])<1e-10:
zeros.append(t_samples[i])
elif abs(a_values[i+1])< 1e-10:
zeros.append(t_samples[i+1])
elif a_values[i] *a_values[i+1] <0:
# when sign change detected, compute the zero using linear interpolation
t1 = t_samples [i]
t2 = t_samples [i+1]
a1 = a_values[i]
a2 = a_values [i+1]
# Linear interpolation to find the zero
zero = t1 -a1*(t2-t1)/ (a2-a1)
zeros.append(zero)
scene_args = pydiffvg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(256, # width
256, # height
2, # num_samples_x
2, # num_samples_y
1, # seed
None, # background_image
*scene_args)
pydiffvg.imwrite(img.cpu(), 'results/vis_single_stroke/init.png', gamma=2.2)
# Optimize
optimizer = torch.optim.Adam([points_n, stroke_color, stroke_width_n], lr=1e-2)
# Run 200 Adam iterations.
for t in range(200):
print('iteration:', t)
optimizer.zero_grad()
# Forward pass: render the image.
path.points = points_n * 256
[shapes, shape_groups]= split_separate_path(path.points)
path.stroke_width = stroke_width_n * 100
path_group.stroke_color = stroke_color
scene_args = pydiffvg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(256, # width
256, # height
2, # num_samples_x
2, # num_samples_y
t+1, # seed
None, # background_image
*scene_args)
# Save the intermediate render.
pydiffvg.imwrite(img.cpu(), 'results/vis_single_stroke/iter_{}.png'.format(t), gamma=2.2)
# Compute the loss function. Here it is L2.
loss = (img - target).pow(2).sum()
print('loss:', loss.item())
# Backpropagate the gradients.
loss.backward()
# Print the gradients
print('points_n.grad:', points_n.grad)
print('stroke_color.grad:', stroke_color.grad)
print('stroke_width.grad:', stroke_width_n.grad)
return zeros
# Take a gradient descent step.
optimizer.step()
# Print the current params.
print('points:', path.points)
print('stroke_color:', path_group.stroke_color)
print('stroke_width:', path.stroke_width)
# Render the final result.
path.points = points_n * 256
[shapes, shape_groups]= split_separate_path(path.points)
path.stroke_width = stroke_width_n * 100
path_group.stroke_color = stroke_color
scene_args = pydiffvg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(256, # width
256, # height
2, # num_samples_x
2, # num_samples_y
202, # seed
None, # background_image
*scene_args)
# Save the images and differences.
pydiffvg.imwrite(img.cpu(), 'results/vis_single_stroke/final.png')
zeros = find_zeros()
# Convert the intermediate renderings to a video.
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/vis_single_stroke/iter_%d.png", "-vb", "20M",
"results/vis_single_stroke/out.mp4"])