import multiprocessing as mp
import time
def iso_task(k):
print(f"task {k} @{time.time()} sec: sleep for 1 second")
time.sleep(1)
print(f"task {k} @{time.time()} sec: finish.")
n = mp.cpu_count()
print(f"Total CPUs = {n}")
tasks = []
start_time = time.time()
for i in range(n): # prepare all tasks to run
task = mp.Process(target=iso_task, args=(i,))
tasks.append(task)
for i in range(n): # fire all tasks at the same time
tasks[i].start()
for i in range(n): # wait all tasks to finish
tasks[i].join()
dt = time.time() - start_time
print(f"{round(dt, 3)} sec elapsed")
學習讓生命更豐富
2025年4月3日 星期四
使用 python 簡單實現多 cpu 平行處理
2025年4月2日 星期三
使用 python 實現 chebyshev 多項式及內插法
不囉唆, 詳如以下代碼:
import numpy as np
def Cp(x, n): # 快速疊代法, 計算 1st kind Chebyshev 多項式
if (n == 0):
return 1
if (n == 1):
return x
pk_1 = 1
pn_1 = x
k = 1
while k < n :
pk = pn_1 * x * 2 - pk_1
pk_1 = pn_1
pn_1 = pk
k += 1
return pn_1
test_f = lambda x: np.exp(-x*x) # 測試函式
Ln = 20
px = [0.0] * Ln
py = [0.0] * Ln
theta = [0.0] * Ln
for k in range(Ln) :
theta[k] = np.pi * (k + 0.5) / Ln;# θₖ = np.pi * (k + 0.5) / Ln
px[k] = np.cos(theta[k]);# pxₖ = cos(θₖ)
py[k] = test_f(px[k]);# pyₖ = f(pxₖ) to be used in Chebyshev Interpolation
def Ci(i): # Coefficient, bind with pyₖ, θₖ, Ln
sum = 0
for k in range(Ln) :
sum += py[k] * np.cos(i * theta[k])
return sum * 2 / Ln # 2/n * Σₙf(pxₖ)*Tₖ(pxₖ), k = 0, 2, ... n - 1
def Chebyshev_interpolation(t): # Chebyshev Interpolation Polynomials
sum = Ci(0) / 2;# Σₙ Cₖ*Tₖ(x) - C₀/2 = Σₖ Cₖ*Tₖ(x) + C₀/2, k = 1, 2, ... n-1, C₀*T₀(x) = C₀
k = 1
while k < Ln :
sum += Ci(k) * Cp(t, k)
k += 1
return sum
for k in range(Ln) :
xk = px[k] + 0.1
c = test_f(xk) # 實際值
h = Chebyshev_interpolation(xk)# Chebyshev 合成值
print(f"x={xk}, 內插={h}, 實際值={c}, 誤差 = {h-c}")
2025年3月20日 星期四
用 python 實現定積分 ∫ₐᵇ f(t)dt
看了一些文章後, 自己手動寫了一些簡單的程式, 實現各種定積分的方式
import numpy as np
def Lp(x, n) : # evaluate order n-1 Legendre polynomials at x
if (n == 0) :
return 1.0
if (n == 1) :
return x
pn_1, pk_1 = x, 1.0 # 疊代初始化, 因為 n > 1, 所以至少疊代一次
k = 1 # 此刻從 k 開始, 直到 n - 1
while k < n :
pn_1, pk_1 = (pn_1*x*(2 * k + 1) - pk_1*k) / (k + 1), pn_1
k += 1 # # 因為 k=n-1 所以 n=k+1, 2*n-1 = 2*(k+1)-1 = 2*k+1
return pn_1 # (Lp(x, k)*x*(2*n-1) - Lp(x, k-1)*k) / n, k = n - 1
def d_Lp(x, n) : # derivative of order n-1 Legendre Polynomials at x
if (n == 0) :
return 0.0
if (n == 1) :
return 1.0;# LP′ₙ(x) = (−LPₙ(x)*x + LPₖ(x)) * n / (1−x*x), k=n-1
return (Lp(x, n - 1) - Lp(x, n) * x) * n / (1.0 - x*x)
# Multiple root finder algorithm for Legendre polynomial: https://publikacio.uni-eszterhazy.hu/3009/1/AMI_33_from3to13.pdf
def by_newton(f, a=1.0, b=2.0, n=10):
def newton_raphson(n, eps=1e-16): # using: xₖ = xₖ - f(xₖ) / [f'(xₖ) - f(xₖ) Σₖ 1/(xₖ - xᵢ)]
e = np.cos(np.array([np.pi*(k + 0.5)/n for k in range(n)])) #initial guess
for k in range(n) : # find all roots by newton raphson method
xk = e[k]
iteration = 0
while (iteration < 1000) :
iteration += 1
f = Lp(xk, n)
temp = f if (f > 0) else -f
if (temp < eps): # 收斂
break
sum_r = 0.0
for j in range(k) :# sum_r = Σₖ 1/(xₖ - xᵢ) to remove previouse root
delta = xk - e[j]
temp = delta if (delta < 0) else -delta
if (temp < eps) :
continue# skip singular value!
sum_r += 1.0 / delta
dx = f / (d_Lp(xk, n) - f * sum_r)
temp = dx if (dx > 0) else -dx
if (temp < eps) : # 收斂
break
xk -= dx # xₖ = xₖ - f(xₖ) / [f'(xₖ) - f(xₖ) Σₖ 1/(xₖ - xᵢ)]
e[k] = xk # final root update
xi = [e]
derivative = d_Lp(xi[0], n)
xi.append(2 / (derivative*derivative*(1 - xi[0]*xi[0])))
return np.array(xi)
xw = newton_raphson(n)
scale = (b - a) / 2.0 # linear transform tx: bias + scale * 1 = b => scale = (b - a) / 2
bias = (b + a) / 2.0 # linear transform tx: bias + scale *(-1) = a => bias = (b + a) / 2
tx = xw[0] * scale + bias # x -> tx
sum = f(tx).dot(xw[1])
return sum * scale
def by_jacobi(f, a=1.0, b=2.0, n=10):
def Jacobi_method(n, eps=1e-16): # using: xₖ = xₖ - f(xₖ) / [f'(xₖ) - f(xₖ) Σₖ 1/(xₖ - xᵢ)]
J = np.zeros((n, n))
d = len(J) - 1 # to fill into the jacobian tridiagonal matrix, trace = 0, and it is a symmetry matrix
for k in range(d): # fill
m = k + 1
J[k][m] = m / np.sqrt(4.0 * m * m - 1.0)
J[m][k] = J[k][m]
e = np.linalg.eigvals(J) # nxn jacobi matrix, solve eigenvalues
xi = [np.sort(e)] # eigenvalue is same as root of the order n-1 Legendre polynopmial
derivative = d_Lp(xi[0], n)
xi.append(2 / (derivative*derivative*(1 - xi[0]*xi[0])))# weight relative eigenvalue
return np.array(xi)
xw = Jacobi_method(n)
scale = (b - a) / 2.0 # linear transform tx: bias + scale * 1 = b => scale = (b - a) / 2
bias = (b + a) / 2.0 # linear transform tx: bias + scale *(-1) = a => bias = (b + a) / 2
tx = xw[0] * scale + bias # x -> tx
sum = f(tx).dot(xw[1])
return sum * scale
def by_trapezoid(f, a=1.0, b=2.0, n=50): # integral with the trapezoid method, 使用 梯形面積=(上底 + 下底)/2 積分
x = np.linspace(a, b, n) # total n pints include a, b
y = f(x) # total n
n -= 1 # split to n - 1 interval
delta_x = (b - a) / n
sum = y[1:n].sum() + (y[0] + y[n]) / 2.0
return sum * delta_x
def by_simpson(f, a=1.0, b=2.0, n=50): # (b-a)/3 Σ[f(a) + 4f(a+h) + 2f(a + h) + 4f(a+2h) + 2f(a+3h) ... + f(b)]
if n % 2 == 1:
n += 1
dx = (b - a) / n
ddx = dx + dx # double dx
x4 = a + dx # 2nd term * 4
x2 = a + ddx # 3rd term * 2
sum = f(a) + f(b) # head + tail
for i in range(1, n - 2, 2) : # exclude head and tail
sum += f(x4) * 4 + f(x2) * 2
x4 += ddx # next
x2 += ddx # next
sum += f(x4) * 4 # last one
return sum * dx / 3
f = lambda x: np.exp(-x**2)
a = 0.0
b = 10.0
n = 15
anser = np.sqrt(np.pi) / 2
jacobi = by_jacobi(f, a, b, n)
newton = by_newton(f, a, b, n)
trapezoid = by_trapezoid(f, a, b, n)
simpson = by_simpson(f, a, b, n)
print(f"∫ :\tjacobi={jacobi} \t, newton={newton} \t, trapezoid={trapezoid}\t, simpson={simpson}\t, compare to {anser}")
print(f"Δ :\t {jacobi - anser} \t, {newton - anser}\t, {trapezoid - anser} \t, {simpson - anser}")
2025年3月16日 星期日
關於內插多項式
x-y 平面上, 相異 2 點 (xₖ, yₖ), k = 0, 1 可以畫成 一條直線(也可以看成是一次多項式 y = a₀ + a₁x), 相異3點不在同一條直線上就可以形成一個拋物線(可以看成是二次多項式 y= a₀ + a₁x + a₂x²), 相異 4 點但不在同一條拋物線上則能形成一個三次曲線(可以看成是三次多項式 y = a₀ + a₁x + a₂x² + a₃x³) , 以此類推, 相異 n 點就可以形成一個 n-1 次曲線, 或者說是 n-1 次多項式 y = Σₙ aₖxᵏ, k = 0 , 1 ,2 , ..., n-1 .數學上有個著名的 Lagrange Interpolation Polynomials, 網上翻譯成"拉格朗日內插多項式", 實際上就是利用 n 點的座標, 推算出該 n-1 次的多項式, 內插產生任何一點的函數值, 這個合成的插值多項式實際上等同原始多項式. 它與原始多項式不偏不移, 不折不扣, 一模一樣(數學上稱為 exact), 只是表達方式不同 f(t) = Σₙ aₖtᵏ, 這裡列出Lagrange Interpolation Polynomials 的另類表達式, 假設 (xₖ, yₖ) 是已知的座標點共有 n 個 {x₀, y₀, x₁, y₁, x₂, y₂, ..., xₖ, yₖ} , 則 :
f(t) = Σₙⱼ [yⱼ * Πₙₖ(t - xₖ)/(xⱼ - xₖ)] 其中 j != k, k = 0, 1, ,2, ..., n-1, j = 0, 1, 2,..., n-1
上面式子中 Σₙ 是 n 項總和, Πₙ 是 n 項總乘積, 我們只要將 t 用 xₖ 帶進去, 就會得到 f(t) = f(xₖ) = yₖ, 就能體會它就是原始多項式無誤, 用這個表達式用意是不需用矩陣運算求出係數 aₖ, 也能推斷出函數多項式的任一點函數值, 其實如果將整個 Lagrange Interpolation Polynomials 仔細展開就可以看出 aₖ 等於是 {x₀, y₀, x₁, y₁, x₂, y₂, ..., xₖ, yₖ} 所組成的函數值, 而 {x₀, y₀, x₁, y₁, x₂, y₂, ..., xₖ, yₖ} 都是已知的常數. 可以參考文章:
https://math.libretexts.org/Courses/Angelo_State_University/Mathematical_Computing_with_Python/3%3A_Interpolation_and_Curve_Fitting/3.2%3A_Polynomial_Interpolation
底下用 c++ 驗證一下結果:
#include<stdio.h>
double Lip(double *x, double *y, int n, double t) {// Lagrange Interpolation Polynomials
auto L = [x, n](int j, double t){
double pi = 1.0;
for (int k = 0; k < n; k ++) {// exclude (x[j] - x[k]) term
if (k == j) continue;
pi *= (t - x[k]) / (x[j] - x[k]);
}
return pi;
};
double f = 0;
for(int j = 0; j < n; j ++) { // Lip(t) = Σₙ (yⱼ * Lⱼ(t)), j = 0, 1, 2, ..., n-1
f += y[j] * L(j, t);
}
return f;
} // f(t) = Σₙ [yⱼ * Πₙ(t - xₖ)/(xⱼ - xₖ)], k = 0, 1, 2, ..., n-1
double *polynomials(double *x, int n) { // order n-1 polynomials
double *f = new double[n]();
for (int i = 0; i < n; i ++) {// f(x) = 1 + x + x^2
f[i] = 2 * x[i];// + x[i] * x[i];
}
return f;
}
int main() {
double x[3] = {1, 2, 3};
int n = sizeof(x)/sizeof(double);
double *y = polynomials(x, n);
printf("ans = %f\n", Lip(x, y, n, 1.2)); // interpolation at x = 1.2
delete [] y;
return 0;
}
2025年3月15日 星期六
用 c++ 寫個簡單的 bisecton 方法找出方程式的實數根
// quad.cpp
#include <stdio.h>
#include <math.h>
double Lp(double x, int n) {// 快速疊代法, order n-1 Legendre polynomial
if (n < 0) return 0; // Invalid order
if (n == 0) return 1; // p0 = 1
if (n == 1) return x; // p1 = x
double pk_1 = 1;// 前一刻初始值 Lp(x, k - 1)
double pn_1 = x;// 此刻初始值 Lp(x, k)
int k = 1; // 此刻, begin to evaluate order n-1 Legendre polynomial
do { // pk_n = (pk*x*(2*n-1)-pk_1*k)/n, k=n-1 => n=k+1, 2*n-1=2*(k+1)-1=2*k+1
double pk = (pn_1 * x * (2 * k + 1) - pk_1 * k) / (k + 1);// 下一刻 pk 值
pk_1 = pn_1;// 下一刻 pk_1 疊代
pn_1 = pk; // 下一刻 pn_1 疊代
} while (++ k < n); // upto n => order n - 1
return pn_1;// final interation of order n-1 Legendre polynomial
}
double d_Lp(double x, int n) { // derivative of order n-1 Legendre Polynomial
if (n <= 0) return 0;
if (n == 1) return 1;// LP′ₙ(x) = (− x*LPₙ(x) + LPₖ(x)) * n / (1 − x*x), k = n-1
return (Lp(x, n - 1) - Lp(x, n) * x) * n / (1.0 - x*x);
}
double Lp_find_root(double left, double right, int nth,int iteration=1000, double eps=1e-16) {
auto LPn = [nth](double x){ return Lp(x, nth); }; // 綁定 Lp(nth, .) 函式
if (LPn(left) == 0) return left ;// 先測左邊界, 若函數值等於0, 毫無懸念, 一定是根
if (LPn(right) == 0) return right;// 再測右邊界, 若函數值等於0, 毫無懸念, 一定是根
auto zero_cross = [LPn](double l, double r) { return LPn(l) * LPn(r); };// 再綁定上面的 LPn(.) 函式
double product = zero_cross(left , right);
if (product > 0) return 2; // 當左右邊界, 正負符號相同, 不可能有根
double root = 0; // Lp 函式的根介於 -1 到 1 之間
int step = 0; // 開始疊帶, to find root of Legendre Polynomials from left to right step by step
double pre_root = 0;
while (step ++ < iteration) {// zero cross 可以確認中間有否有交點 {0}, 當一邊是正, 另一邊是負, 乘積是負的, 必有根
root = (left + right) / 2; // 左右兩邊夾擊取中點當作根
double f = LPn(root);
if (f < 0) f = -f; // 函數值取絕對值
if (f < eps) break;// 勉強找到根了
double temp = pre_root - root;// 前後根差異
pre_root = root;
if (temp < 0) temp = -temp; // 取絕對值
if (temp < eps) {// todo: 尚可接受
break;
}
product = zero_cross(left, root);// 待決定的根與左邊界, 算出零交越值
if (product > 0) left = root; // 若與左邊界正負符號相同, 表示根在右邊,換掉左邊界, 下次再試
else if (product < 0) right = root; // 與左邊界符號不同, 確認有零交越, 換掉右邊界, 下次再試
}
return root;// 介於 left 與 right 之間的根
}
double *Lp_solver(int nth = 10) {
static double xi[1000 * 2];// todo: validate n
for (int i = 0; i <= 2 * nth; i++) xi[i] = 0; // clear all data first
int k = 0;
double step = 1e-3;
for (double x = -1.0; x <= 1.0; x += step) { // step by step
double root = Lp_find_root(x, x + step, nth); // find root between x and x + steps
if (root > 1) continue;
double derivative = d_Lp(root, nth);
xi[k] = root;// position of x
xi[nth + k ++] = 2 / (derivative*derivative*(1 - root*root));// weight of w
}
return (k > 0) ? xi : nullptr;
}
double integral_quad(double f(double), double a, double b, int nth = 10) {
static double *xi;
static int Ln;
if (xi == nullptr || Ln != nth) {
Ln = nth; // reset Ln
xi = Lp_solver(Ln);// solve once
}
double *wi = xi + Ln;
double scale = (b - a) / 2; // linear transform xd: bias + scale * 1 = b , scale = (b - a) / 2
double bias = (b + a) / 2; // linear transform xd: bias + scale *(-1) = a , bias = (b + a) / 2
double sum = 0;
for(int i = 0; i < Ln; i ++) {
sum += f(xi[i] * scale + bias) * wi[i];
}
double area = sum * scale;
return area;
}
int main() {
double area = integral_quad([](double x) { // f(x) = (2*x + 3/x)²
double y = 2*x + 3/x;// 要避開 x = 0
return y * y;
},
1, 100
);
printf("Area = %f\n", area);
}
n = 5
J = np.zeros((n,n))
d = len(J) - 1
for k in range(d):
m = k + 1
J[k][m] = m / np.sqrt(4.0 * m * m - 1.0)
J[m][k] = J[k][m]
e, v = np.linalg.eig(J) # 解出方陣的 eigenvalues 及 eigenvectors
print(np.sort(e)) # 只列出 eigenvalues 根
2025年3月11日 星期二
使用 python3 autograd 及 numpy 自定一個微分方程式
from autograd import elementwise_grad as grad
from autograd.extend import defvjp, primitive
import numpy as np
# 定義函式原型, 輸入資料型態必須是 np.array(.) 才能在 autograd 內運算
@primitive
def polynomial_f(x): # primitive 只能用 def, 不能用 lambda, 用來定義 forward function
return x**3 # x³
# defvjp(polynomial_f, lambda o, x : (lambda g: 3*x**2))
#定義微分的 backward function, 需要將矩陣乘上微分方程
def grad_f(o, x): # 可以直接回傳 lambda, o 是 f(x) 的輸出, x 是 f(x) 輸入
def derivative(_): # 綁定 x
return 3*x**2
# return np.full(x.shape, _) * 3*x**2 # 綁定 x, _
return derivative
defvjp(polynomial_f, grad_f) # 將原型與微分方程綁定, 兜在一起
# 呼叫 autograd 的 grad function, 定義 n 階微分方程(order-n derivative): f'ⁿ(x) = dⁿf(x)/dx
df_n = lambda f, n = 1: df_n(grad(f), n - 1) if n > 1 else grad(f)
x = np.array([1, 2, 3, 4, 5]) # 測試 x 座標點, x 當成一組向量
print(f"f(x) = x³ : {polynomial_f(x)},\tx = {x}") # f(x) = x³
print(f"f'(x) = 3x² : {df_n(polynomial_f)(x) },\tx = {x}") # f(x) 1 階微分f'(x) = 3x²
print(f"f²'(x) = 6x : {df_n(polynomial_f, 2)(x)},\tx = {x}") # f(x) 2 階微分f"(x) = 6x
print(f"f³'(x) = 6 : {df_n(polynomial_f, 3)(x)},\tx = {x}") # f(x) 3 階微分f"'(x) = 6
print(f"f⁴'(x) = 0 : {df_n(polynomial_f, 4)(x)},\tx = {x}") # f(x) 4 階微分f""(x) = 0
2025年3月10日 星期一
使用 python autograd 驗證 Gradient Decent 演算法
from autograd import elementwise_grad as grad
import autograd.numpy as auto_np
import matplotlib.pyplot as plt
import numpy as np
import tqdm
ln_ = lambda v: auto_np.log(v) # ln(v): natural log function
sum_ = lambda v: auto_np.sum(v) # Σ(v): summation function
sigmoid_ = lambda x: 1/(1 + auto_np.exp(-x))# sigmoid function = 1/(1 + exp(-x))
predict_ = lambda x, w: auto_np.dot(x, w) # forward x into the neural network w to get output
probability = lambda x, w: sigmoid_(predict_(x, w))
Y1 = np.array([1, 0, 0, 0]) # 期望值輸出(機率), 使用 one hot encode
X1 = np.array([[0.52, 1.12, 0.77],
[0.88, -1.08, 0.15],
[0.52, 0.06, -1.30],
[0.74, -2.49, 1.39]]) # X1 有 4 個訓練樣本(= 4 rows), 每個樣本有3個特性組成一個列向量
W1 = np.array([0.0, 0.0, 0.0]) # 訓練參數對應的權重 weight 期望達成輸出 Y1 = [1 0 0 0]
def logistic_crosss_entropy(W, X, P): # W, X, P 都是矩陣, LCE = -Σ P*ln(Q), P: one hot encode, element ∈ {0, 1}
length = X.shape[0]
if length == P.shape[0] and W.shape[0] == X.shape[1] :
Z = predict_(X, W) # predict output
lce = P*ln_(1 + auto_np.exp(-Z)) + (1 - P) * ln_(1 + auto_np.exp(Z))
return sum_(lce) / length
gradient_Loss = grad(logistic_crosss_entropy) # ∇L(w, x, y) = ∂L(w, x, y)/∂w => loss L(w) focus on w
logger = []
print(f"訓練前 lce loss:{logistic_crosss_entropy(W1, X1, Y1)}, 輸出機率: {probability(X1, W1)}")
for iteration in tqdm.tqdm(range(1000)):
W1 -= 0.01 * gradient_Loss(W1, X1, Y1) # learning rate = 0.01, 1 batch (4 samples), use GD optimizer
logger.append([iteration, logistic_crosss_entropy(W1, X1, Y1)]) # 使用訓練完後的 W1 估算 LCE
print(f"訓練後 lce loss:{logistic_crosss_entropy(W1, X1, Y1)}, 輸出機率: {probability(X1, W1)}")
if len(logger) > 0: # plot figure for the data in logger
logger = np.array(logger).T
plt.plot(logger[0], logger[1], color="r", label="Logistic Cross Entropy")
plt.xlabel("epochs")
plt.ylabel("LCE")
plt.title("Training")
plt.legend() # to show the multi label
plt.show()
備註:
sigmoid = 1/(1 + exp(-z))
logistic_crosss_entropy LCE = -Σ P*ln(Q)
z = x.dot(w)
Q = sigmoid(z) = 1/(1 + exp(-z))
element of P ∈ {0, 1}
loss mean = sum_(- P*ln_(Q) - (1 - P)*ln_(1.0 - Q)) / length
LCE = - P*ln_(Q) - (1 - P)*ln_(1.0 - Q)
= - P*ln_(1/(1 + exp(-z))) - (1 - P)*ln_(1.0 - 1/(1 + exp(-z))
= P*ln_(1 + exp(-z)) + (1 - P)*ln_((exp(-z) + 1)/exp(-z))
= P*ln_(1 + exp(-z)) + (1 - P)*ln_(1 + 1/exp(-z))
= P*ln_(1 + exp(-z)) + (1 - P)*ln_(1 + exp(z))
2025年3月9日 星期日
簡單測試 python asyncio 的寫法
import asyncio
wait_task_finish = True
async def task_run(_future_obj_, k):
print(f"task {k} running ...")
_future_obj_.set_result(k + 100)
await asyncio.sleep(3)
print(f"task {k} finish.")
return k
async def async_tasks(k):
obj_list=[]
if k > 0:
print(f"begin to create {k} tasks ...")
task_list = []
for i in range(k):
obj_list.append(asyncio.Future())
task_list.append(asyncio.create_task(task_run(obj_list[i], i)))
if wait_task_finish:
print(f"Wait all tasks finish...")
for _task_ in task_list:
result = await _task_
print(f"task {_task_} return {result}")
else:
await asyncio.sleep(1) # if sleep time is not enough, task_run may not be finished
print(f"async_tasks finish.")
return obj_list
print(f"asyncio.run ...")
result = asyncio.run(async_tasks(4))
print(f"print finish result ...")
for _obj_ in result:
if _obj_.done():
print(_obj_.result())
else:
print(f"{_obj_} is not set_result!")
可以更改 wait_task_finish 為 True 或 False, 驗證執行結果: python3 test_async.py
2025年3月8日 星期六
用泰勒展開式逼近一個 cos 函數
import autograd.numpy as audo_np
import matplotlib.pyplot as plt
def taylor_bind(f, nth = 15, a = 0.0):
def g(x) : # order-n taylor series, f(x) = Σₙ[fⁿ(0) * xⁿ / n!]
if nth < 0:# todo
return 0
elif nth == 0:# todo
return 1
elif nth == 1:
return f(a)
taylor_sum = f(a) # initial value when order-n > 1
d_f = auto_grad.elementwise_grad(f) # inital derivative term
xpow = x - a # inital power term
n_ = 1 # inital factorial term
k = 1 # iteration from 1, upto n
while True : # iteration begin, 開始疊代
taylor_sum += d_f(a) * xpow / n_
k += 1 # pipe next
if k == nth: # no need to run while upto nth
break
d_f = auto_grad.elementwise_grad(d_f) # next derivative of f
xpow *= (x - a) # next power of (x - a)
n_ *= k # next factorial
return taylor_sum
return g # function can be run in the future
x0 = audo_np.linspace(-7, 7, 500)
y1 = audo_np.cos
y2 = taylor_bind(y1)
plt.plot(x0, y2(x0), color="g", label="taylor_bind")
plt.plot(x0, y1(x0), color="r", label="cos")
plt.xlabel("x")
plt.title("Taylor series fit")
plt.legend() # to show the multi label
plt.show()
2025年3月7日 星期五
使用 python3 的 autograd package 產生 n-階微分方程式並繪圖
1. 先安裝 python3 的 python3-venv 虛擬環境, 並安裝到目錄
sudo apt install python3-venv
cd ~
python3 -m venv venv
2. 進入 python3 虛擬環境, 安裝 autograd
cd ~
source venv/bin/activate
pip install autograd
3. 編輯以下測試檔: xed ~/test_grad.py
from autograd import elementwise_grad as egrad
import autograd.numpy as audo_np
import matplotlib.pyplot as plt
tanh = lambda t: (1.0 - audo_np.exp((-2 * t))) / (1.0 + audo_np.exp(-(2 * t)))
derivative_ = lambda n, f: derivative_(n - 1, derivative_(1, f)) if n > 1 else egrad(f) # dⁿf(x)/dx
#
x = audo_np.linspace(-7, 7, 500)
y0 = tanh(x)
y1 = derivative_(1, tanh)(x)
y2 = derivative_(2, tanh)(x)
y3 = derivative_(3, tanh)(x)
y4 = derivative_(4, tanh)(x)
#
plt.plot(x, y0, x, y1, x, y2, x, y3, x, y4)
plt.show()
4. 啟用虛擬環境, 執行看看:
cd ~
source venv/bin/activate
python3 test_grad.py
p.s.如果不想安裝 autograd, 可以上官網把整個目錄下載回來: https://github.com/HIPS/autograd
2025年3月4日 星期二
學習類神經網路
參考影片
1. https://www.youtube.com/watch?v=ErnWZxJovaM&list=PLtBw6njQRU-rwp5__7C0oIVt26ZgjG9NI
2. https://www.youtube.com/watch?v=BHgssEwMxsY
類神經網路(Neural Network)通常有一個輸入層及一層輸出層, 中間有一個以上的隱藏層,當隱藏層大於 1 時稱為 deep neural network (DNN).層與層之間透過神經元(neuron)互相連接,輸入層神經元收到輸入資料後透過權重將資料往內層擴散傳播,隱藏層的神經元將資料收集加總再串接 activation function 把資料映射後透過權重把資料繼續往內層擴散, 終端輸出層則是加總前一隱藏層的擴散資料再串接 activation function 後輸出數值. 最後透過 backpropagation 演算法訓練出模型內的參數值. 但利用損失函數導出偏微分函數是一件很複雜的事, 還有一種方式是透過 auto gradient computational graph 算出偏微函數值, 讓訓練程序變得簡潔, 可以參考 micrograd 網站 https://github.com/karpathy/micrograd , 或是 teenygrad 的網站 https://github.com/tinygrad/teenygrad/activity 以及 tinygrad 網站 https://github.com/tinygrad/tinygrad , 內部有完整實現出 autograd 方式. 其中 micrograd 原始碼引擎不到 100 行簡單扼要, teenygrad 不到 1000 行, 還另外實現出 Tesor 及 Optimizer 運算法, 而 tinygrad 則可以利用 GPU 來加速運算. 讓 python 不用安裝 pytorch 也能寫出簡單的類神經網路運算法
使用 python 簡單實現多 cpu 平行處理
import multiprocessing as mp import time def iso_task(k): print(f"task {k} @{time.time()} sec: sleep for 1 second") time....
-
1. 目前使用 linux mint 22.1 作業系統可以順利跑起來, 可上官網去下載, 並安裝到硬碟. 2. 安裝 waydroid 可上網站 https://docs.waydro.id 參考看看: https://docs.waydro.id/usage/inst...
-
1. c++ matplotlib 實際上是將 python 的 matplotlib 包裝成 c++ 可呼叫的程式介面, 因此要先安裝 python 等相關程式及程式庫, 繪圖程式介面只要下載一個檔案 matplotlibcpp.h, 可上官網 https://gi...