The problem was solved using LAGRANGE priciple. The values of x0, x1 and x2 as the values of y0, y1 and y2 are changed for points a), b) and c). The images are shown below the code, for each of a), b) and c).
import matplotlib.pyplot as plt
import numpy as np
pi = 3.14
_x0 = 3
_y0 = 1.0986123
_x1 = 4
_y1 = 1.3862944
_x2 = 5
_y2 = 1.6094379
_x = np.arange(-500, 500, 0.1);
def L_function(x, x1, x2, x0, y0, y1, y2):
L20 = ((x - x1) * (x - x2)) / ((x0 - x1) * (x0 - x2))
L21 = ((x - x0) * (x - x2)) / ((x1 - x0) * (x1 - x2))
L22 = ((x - x0) * (x - x1)) / ((x2 - x0) * (x2 - x1))
L = y0 * L20 + y1 * L21 + y2 * L22
return L
_y = L_function(_x, _x1, _x2, _x0, _y0, _y1, _y2)
print (_y)
plt.plot(_y)
plt.show()
Using Lagrangian interpolation, we were allowe to simulate the graph of the function. Even if we obtained some results, Lagrange interpolation is essentially NEVER a good choice for interpolation, because in reality, the obrained graph cand be far from real function graph. Therefore, is a good way to INTRODUCE ideas of interpolation, and sometimes to prove some simple results. But for precision you need something more.
Same principle as in exercise Nr. 1
import matplotlib.pyplot as plt
import numpy as np
pi = 3.14
_x0 = 1
_y0 = 0
_x1 = 3
_y1 = 1.0986123
_x2 = 4
_y2 = 1.3862944
_x3 = 6
_y3 = 1.7917595
_x = np.arange(-50, 50, 0.1);
def L_function(x, x1, x2, x0, x3, y0, y1, y2, y3):
L30 = ((x - x1) * (x - x2) * (x - x3)) / ((x0 - x1) * (x0 - x2) * (x0 - x3))
L31 = ((x - x0) * (x - x2) * (x - x3)) / ((x1 - x0) * (x1 - x2) * (x1 - x3))
L32 = ((x - x0) * (x - x1) * (x - x3)) / ((x2 - x0) * (x2 - x1) * (x2 - x3))
L33 = ((x - x0) * (x - x1) * (x - x2)) / ((x3 - x0) * (x3 - x1) * (x3 - x2))
L = y0 * L30 + y1 * L31 + y2 * L32 + y3 * L33
return L
_y = L_function(_x, _x1, _x2, _x0, _x3, _y0, _y1, _y2, _y3)
print (_y)
plt.plot(_y)
plt.show()
First of all, I used a 4x4 system equation solver. I obtained the values for x1, x2, x3 and x4. By using the formula, I evaluated the function and obtained the result of the function in point 27. The code of the last part can be seen below.
x1 = 14.263
x2 = -0.351354166667
x3 = 0.0054140625
x4 = -0.0000384114583333
x = 27
f27 = x1 + x2 * x + x3 * (x ** 2) + x4 * (x ** 3)
print f27
from math import *
from pylab import *
def f(x):
return 0.2 + 25 * x - 200 * (x **2) + 675 * (x ** 3) - 900 * (x ** 4) + 400 * (x ** 5)
def simpson(f, a, b, n):
h=(b-a)/n
k=0.0
x=a + h
for i in range(1,n/2 + 1):
k += 4*f(x)
x += 2*h
x = a + 2*h
for i in range(1,n/2):
k += 2*f(x)
x += 2*h
return (h/3)*(f(a)+f(b)+k)
def trapezoidal(f, a, b, n):
h = float(b - a) / n
s = 0.0
s += f(a)/2.0
for i in range(1, n):
s += f(a + i*h)
s += f(b)/2.0
return s * h
n = 2
def printFunction(x, n):
print " n | Value"
for i in range(10):
print "n = ", n, " ", x
n = n * 2
print " Simpson method: "
printFunction(simpson(f, 0.0, 0.8, n), n)
print " Trapezoidal method: "
printFunction(trapezoidal(f, 0.0, 0.8, n), n)
The trapezoidal rule is not as accurate as Simpson’s Rule when the underlying function is smooth, because Simpson’s rule uses quadratic approximations instead of linear approximations. The formula is usually given in the case of an odd number of equally spaced points.
#include<iostream>
using namespace std;
int main(void){
float a[10][10], b[10], x[10], y[10];
int n = 3;
int iterations = 20;
//initial values
x[0] = x[1] = x[2] = 0;
//values for the matrix a
a[0][0] = 1;
a[0][1] = -2;
a[1][0] = 2;
a[1][1] = 1;
//values from the rightpart of equal sign
b[0] = -1;
b[1] = 3;
while (iterations > 0){
for (int i = 0; i < n; i++){
y[i] = (b[i] / a[i][i]);
for (int j = 0; j < n; j++){
if (j == i)
continue;
y[i] = y[i] - ((a[i][j] / a[i][i]) * x[j]);
x[i] = y[i];
}
cout << "x" << i+1 << " = " << y[i] << "\t\t\t";
}
cout << "\n";
iterations--;
}
return 0;
}
x1 | x2 | x3 |
---|---|---|
-1 | 5 | -7.26705e-40 |
9 | -15 | .90044e-39 |
-31 | 65 | -1.16082e-38 |
129 | -255 | 4.64262e-38 |
-511 | 1025 | -1.85711e-37 |
2049 | -4095 | 7.42839e-37 |
-8191 | 16385 | -2.97136e-36 |
32769 | -65535 | 1.18854e-35 |
-131071 | 262145 | -4.75418e-35 |
524289 | -1.04858e+06 | 1.90167e-34 |
-2.09715e+06 | 4.1943e+06 | -7.60668e-34 |
8.38861e+06 | -1.67772e+07 | 3.04267e-33 |
-3.35544e+07 | 6.71089e+07 | -1.21707e-32 |
1.34218e+08 | -2.68435e+08 | 4.86828e-32 |
-5.36871e+08 | 1.07374e+09 | -1.94731e-31 |
2.14748e+09 | -4.29497e+09/td> | 7.78924e-31 |
-8.58993e+09 | 1.71799e+10 | -3.1157e-30 |
3.43597e+10 | -6.87195e+10 | 1.24628e-2 |
-1.37439e+11 | 2.74878e+11 | -4.98512e-29 |
5.49756e+11 | -1.09951e+12 | 1.99405e-28 |
As we analyze the results we observe that the method diverged. This reusults are due to the fact that the matrix does
not respect the conditions of the method.
#include<iostream>
using namespace std;
int main(void){
float a[10][10], b[10], x[10], y[10];
int n = 3;
int iterations = 9;
//initial values
x[0] = x[1] = x[2] = 0;
//values for the matrix a
a[0][0] = 3;
a[0][1] = -0.1;
a[0][2] = -0.2;
a[1][0] = 0.1;
a[1][1] = 7;
a[1][2] = -0.3;
a[2][0] = 0.3;
a[2][1] = -0.2;
a[2][2] = 10;
//values from the rightpart of equal sign
b[0] = 7.85;
b[1] = -19.3;
b[2] = 71.4;
while (iterations > 0){
for (int i = 0; i < n; i++){
y[i] = (b[i] / a[i][i]);
for (int j = 0; j < n; j++){
if (j == i)
continue;
y[i] = y[i] - ((a[i][j] / a[i][i]) * x[j]);
x[i] = y[i];
}
cout << "x" << i+1 << " = " << y[i] << "\t\t\t";
}
cout << "\n";
iterations--;
}
return 0;
}
x1 = 0.0666667 x2 = 1.12048 x3 = -1.90959
x1 = -0.0899568 x2 = -4.1423 x3 = -2.01015
x1 = -0.272087 x2 = -4.41695 x3 = -2.01018
x1 = -0.281243 x2 = -4.4169 x3 = -2.0099
x1 = -0.281223 x2 = -4.41614 x3 = -2.00989
x1 = -0.281197 x2 = -4.4161 x3 = -2.00989
x1 = -0.281196 x2 = -4.4161 x3 = -2.00989
x1 = -0.281196 x2 = -4.4161 x3 = -2.00989
x1 = -0.281196 x2 = -4.4161 x3 = -2.00989
x1 = -0.281196 x2 = -4.4161 x3 = -2.00989
x1 | x2 | x3 |
---|---|---|
2.61667 | -2.79452 | 7.00561 |
2.99056 | -2.49962 | 7.00029 |
3.00003 | -2.49999 | 7 |
3 | -2.5 | 7 |
3 | -2.5 | 7 |
3 | -2.5 | 7 |
3 | -2.5 | 7 |
3 | -2.5 | 7 |
3 | -2.5 | 7 |
The second method converges, and it does it rapidly because the matrix satisfies the requirments.
As we see, a few iterations were sufficient to find out which is the solution.
Moreover, by analyzing information sources, we proved the method`s characteristics:
Guass-Seidel method is one of the common methods employed for solving power flow equations.
# first porblem
from math import *
def f(x):
return sqrt(x) - exp(-x)
def derivative(x):
return 1.0 / (2 * sqrt(x)) - 1
# Newton`s method
def newtonMethod(f, derivative, x0, tolerance = 0.0001, maxIter = 100):
n = 1
print ("\n")
while n <= maxIter:
x1 = f(x0) - f(x0) / derivative(x0)
print ("n = %d, Xn = %.8f, Xn+1 - Xn = %.8f, f(x+1) = %.8f") % (n, x0, x1 - x0, f(x1))
if abs(x1 - x0) < tolerance:
print "Number of iterations: ", n
return x1
else:
n = n + 1
x0 = x1
return False
n = 1, Xn = 0.60000000, Xn+1 - Xn = 0.26269128, f(x+1) = 0.50678699
n = 2, Xn = 0.86269128, Xn+1 - Xn = 0.74180283, f(x+1) = 1.06569506
n = 3, Xn = 1.60449411, Xn+1 - Xn = 1.22189676, f(x+1) = 1.62196112
n = 4, Xn = 2.82639087, Xn+1 - Xn = 1.10411217, f(x+1) = 1.96291584
n = 5, Xn = 3.93050304, Xn+1 - Xn = 0.65733542, f(x+1) = 2.13174918
n = 6, Xn = 4.58783846, Xn+1 - Xn = 0.32482185, f(x+1) = 2.20909929
n = 7, Xn = 4.91266031, Xn+1 - Xn = 0.14904564, f(x+1) = 2.24348879
n = 8, Xn = 5.06170595, Xn+1 - Xn = 0.06633308, f(x+1) = 2.25858922
n = 9, Xn = 5.12803903, Xn+1 - Xn = 0.02914138, f(x+1) = 2.26518471
n = 10, Xn = 5.15718041, Xn+1 - Xn = 0.01273111, f(x+1) = 2.26805887
n = 11, Xn = 5.16991152, Xn+1 - Xn = 0.00554847, f(x+1) = 2.26931012
n = 12, Xn = 5.17545999, Xn+1 - Xn = 0.00241559, f(x+1) = 2.26985460
n = 13, Xn = 5.17787558, Xn+1 - Xn = 0.00105118, f(x+1) = 2.27009150
n = 14, Xn = 5.17892676, Xn+1 - Xn = 0.00045734, f(x+1) = 2.27019455
n = 15, Xn = 5.17938411, Xn+1 - Xn = 0.00019896, f(x+1) = 2.27023939
n = 16, Xn = 5.17958307, Xn+1 - Xn = 0.00008655, f(x+1) = 2.27025889
Number of iterations: 16
# Newton`s simplified method
def newtonSimplifMethod(f, x0, tolerance = 0.0001, maxIter = 100):
n = 1
print ("\n")
while n <= maxIter:
x1 = x0 - f(x0) / ((f(x0 + f(x0)) - f(x0)) / f(x0))
print ("n = %d, Xn = %.8f, Xn+1 - Xn = %.8f, f(x+1) = %.8f") % (n, x0, x1 - x0, f(x1))
if abs(x1 - x0) < tolerance:
print "Number of iterations: ", n
return x1
else:
n = n + 1
x0 = x1
return False
n = 1, Xn = 0.60000000, Xn+1 - Xn = -0.20803375, f(x+1) = -0.04965483
n = 2, Xn = 0.39196625, Xn+1 - Xn = 0.03270102, f(x+1) = -0.00232234
n = 3, Xn = 0.42466728, Xn+1 - Xn = 0.00163193, f(x+1) = -0.00000503
n = 4, Xn = 0.42629921, Xn+1 - Xn = 0.00000354, f(x+1) = -0.00000000
Number of iterations: 4
# Secant method
def secant(f, x0, x1, tol=0.0001, maxIt=100):
n = 1
print("\n")
while n <= maxIt:
x2 = x1 - f(x1) * ((x1 - x0) / (f(x1) - f(x0)))
print(("n = %d, Xn = %.8f, Xn+1 = %.8f, root = %.8f, f(root) = %.8f") % (n - 1, x0, x1, x2, f(x2)))
if abs(x2 - x1) < tol:
print "\nNumber of iterations =", n
return x2
else:
n = n + 1
x0 = x1
x1 = x2
return False
n = 1, Xn = 0.60000000, Xn+1 - Xn = -0.20803375, f(x+1) = -0.04965483
Number of iterations: 1
By implementing each rootfinding method we can analyze their behaviour and the way they converge to the real root.Therefore we see that the rate of convergence of Newton , simplified Newton and secant method is greater. Here are the results for f(x) in estimated root . We see that newton’s method gave the best approximation for the real root , because it is closer to 0.0 than others.
import math
def func1(x):
return 2 * math.exp(-x)
def func2(x):
return 0.9 / ( 1 + x ** 4)
def func3(x):
return 6.28 + math.sin(x)
def fixed(f, x0, tol = 0.000001, max_it=1000):
n=1
while n <= max_it:
x1 = f(x0)
if (abs(x1 - x0) < tol):
print("Root is =", x1)
print("Number of iterations =", n)
return
else:
x0 = x1
n = n + 1
Fixed:
First function:
(‘Root is =’, 0.8526051018601054)
('Number of iterations =’, 74)
Second function:
('Root is =’, 0.7141895972112392)
('Number of iterations =’, 59)
Third funtion:
('Root is =’, 6.015529419935787)
('Number of iterations =’, 388)
As we`ve seen, the greatest advantage of this method is that it converges rapidly and, it also can be easily implemented. But, when applying it one should be aware that it diverges if the absolute value of the derivative of g(x) with respect to x is greater than 1 for all x in the interval containing the root. Also, if the equation has more than 1 root, and f(x) is continuous then this method may miss one or more roots.
Find the root of the function f(x) = sqrt x - e-x on the interval [0, 1.2] with the error tolerance of 10-4. a) Apply the bisection method starting with a = 0 and b = 1.2. b) Apply the false position method starting with a = 0 and b = 1.2. For each method compute also the absolute errors |x - x*| for each iterate
import math
#given function
def func(x):
return math.sqrt(x)-math.exp(-x)
#bisection method
def bisection(f,a,b,tol=0.0001,max_it=100):
n=1
while n<=max_it:
c=(a+b)/2.0
print(("n=%d a=%.8f b=%.8f mid=%.8f f(mid)=%.8f")%(n,a,b,c,f(c)))
if f(c)==0 or (b-a)/2<tol:
print(("\nnumber of iterations = %d")%(n))
return c
else:
n=n+1
if f(c)*f(a)>0:
a=c
else:
b=c
return False
n=1 a=0.00000000 b=1.20000000 mid=0.60000000 f(mid)=0.22578503
n=2 a=0.00000000 b=0.60000000 mid=0.30000000 f(mid)=-0.19309566
n=3 a=0.30000000 b=0.60000000 mid=0.45000000 f(mid)=0.03319224
n=4 a=0.30000000 b=0.45000000 mid=0.37500000 f(mid)=-0.07491684
n=5 a=0.37500000 b=0.45000000 mid=0.41250000 f(mid)=-0.01973157
n=6 a=0.41250000 b=0.45000000 mid=0.43125000 f(mid)=0.00699981
n=7 a=0.41250000 b=0.43125000 mid=0.42187500 f(mid)=-0.00629696
n=8 a=0.42187500 b=0.43125000 mid=0.42656250 f(mid)=0.00036846
n=9 a=0.42187500 b=0.42656250 mid=0.42421875 f(mid)=-0.00295997
n=10 a=0.42421875 b=0.42656250 mid=0.42539062 f(mid)=-0.00129469
n=11 a=0.42539062 b=0.42656250 mid=0.42597656 f(mid)=-0.00046285
n=12 a=0.42597656 b=0.42656250 mid=0.42626953 f(mid)=-0.00004713
n=13 a=0.42626953 b=0.42656250 mid=0.42641602 f(mid)=0.00016068
n=14 a=0.42626953 b=0.42641602 mid=0.42634277 f(mid)=0.00005678
import math
def func(x):
return math.sqrt(x)-math.exp(-x)
def falsePosMethod(f, x0, x1, tol=0.0001, max_it=100):
n = 1
while n<=max_it:
a = (f(x0) * x1 - f(x1) * x0) / (f(x0) - f(x1))
if abs(f(a) ) < tol:
return a
if f(a) * f(x0) < 0:
x0 = a
else:
x1 = a
n = n + 1
return a
0.426243709669
After implementting the rootfindigs methods we can analyze their behaviour and the way they converge to the real root. As we observe, bisection method coverges really slow to the real root. Bisection method never diverges from the root but always converges to the root. However, the convergence process may take a lot of iterations and takes a very long interval of time. The second method seems to be more appropriate as it does not need such a bunch if iterations to get same result. In conclusion, false position method seems to be more appropriate in solving this problem.
Big Bank in Ionland is launching a new promo campaign on it is new “Unbelievable offer” for mortgages. You are an ordinary citizen in Ionland with a good job which pays you a good salary. You are young, beautiful and healthy and consider to have a family and since every family should have it`s own nest you are interested in the offer. The only thing that is missing from the picture is how much actually should you pay each month. A typical mortgage loan will consist of an amount a that is borrowed at an anual interest rate of r\% for n years. Show that if the borrower makes a monthly payment of p ionian dolars, then the total amount left to pay after n years is $$a(1+r)n - \sum\limits_{i=0}{12*n-1} p(1+r/12)i = a(1 + r)n - p\frac{(1+r/12){12n}-1}{r/12}$$
loan = 50000
monthly_pay = 700
rate = 0.08
percentage = (loan * rate) / 100
rem = loan
months = 0
while rem > 0:
rem = loan - monthly_pay + percentage
loan = rem
percentage = (loan * rate) / 100
months = months + 1
print(("After %d months, remains to pay %.2f dollars.") % (months,loan))
print(months)
After 1 months, remains to pay 49340.00 dollars.
After 2 months, remains to pay 48679.47 dollars.
After 3 months, remains to pay 48018.42 dollars.
After 4 months, remains to pay 47356.83 dollars.
After 5 months, remains to pay 46694.72 dollars.
After 6 months, remains to pay 46032.07 dollars.
After 7 months, remains to pay 45368.90 dollars.
After 8 months, remains to pay 44705.19 dollars.
After 9 months, remains to pay 44040.96 dollars.
After 10 months, remains to pay 43376.19 dollars.
After 11 months, remains to pay 42710.89 dollars.
After 12 months, remains to pay 42045.06 dollars.
After 13 months, remains to pay 41378.69 dollars.
After 14 months, remains to pay 40711.80 dollars.
After 15 months, remains to pay 40044.37 dollars.
After 16 months, remains to pay 39376.40 dollars.
After 17 months, remains to pay 38707.90 dollars.
After 18 months, remains to pay 38038.87 dollars.
After 19 months, remains to pay 37369.30 dollars.
After 20 months, remains to pay 36699.20 dollars.
After 21 months, remains to pay 36028.56 dollars.
After 22 months, remains to pay 35357.38 dollars.
After 23 months, remains to pay 34685.66 dollars.
After 24 months, remains to pay 34013.41 dollars.
After 25 months, remains to pay 33340.62 dollars.
After 26 months, remains to pay 32667.30 dollars.
After 27 months, remains to pay 31993.43 dollars.
After 28 months, remains to pay 31319.03 dollars.
After 29 months, remains to pay 30644.08 dollars.
After 30 months, remains to pay 29968.60 dollars.
After 31 months, remains to pay 29292.57 dollars.
After 32 months, remains to pay 28616.00 dollars.
After 33 months, remains to pay 27938.90 dollars.
After 34 months, remains to pay 27261.25 dollars.
After 35 months, remains to pay 26583.06 dollars.
After 36 months, remains to pay 25904.32 dollars.
After 37 months, remains to pay 25225.05 dollars.
After 38 months, remains to pay 24545.23 dollars.
After 39 months, remains to pay 23864.86 dollars.
After 40 months, remains to pay 23183.96 dollars.
After 41 months, remains to pay 22502.50 dollars.
After 42 months, remains to pay 21820.50 dollars.
After 43 months, remains to pay 21137.96 dollars.
After 44 months, remains to pay 20454.87 dollars.
After 45 months, remains to pay 19771.24 dollars.
After 46 months, remains to pay 19087.05 dollars.
After 47 months, remains to pay 18402.32 dollars.
After 48 months, remains to pay 17717.04 dollars.
After 49 months, remains to pay 17031.22 dollars.
After 50 months, remains to pay 16344.84 dollars.
After 51 months, remains to pay 15657.92 dollars.
After 52 months, remains to pay 14970.44 dollars.
After 53 months, remains to pay 14282.42 dollars.
After 54 months, remains to pay 13593.85 dollars.
After 55 months, remains to pay 12904.72 dollars.
After 56 months, remains to pay 12215.05 dollars.
After 57 months, remains to pay 11524.82 dollars.
After 58 months, remains to pay 10834.04 dollars.
After 59 months, remains to pay 10142.71 dollars.
After 60 months, remains to pay 9450.82 dollars.
After 61 months, remains to pay 8758.38 dollars.
After 62 months, remains to pay 8065.39 dollars.
After 63 months, remains to pay 7371.84 dollars.
After 64 months, remains to pay 6677.74 dollars.
After 65 months, remains to pay 5983.08 dollars.
After 66 months, remains to pay 5287.86 dollars.
After 67 months, remains to pay 4592.10 dollars.
After 68 months, remains to pay 3895.77 dollars.
After 69 months, remains to pay 3198.89 dollars.
After 70 months, remains to pay 2501.44 dollars.
After 71 months, remains to pay 1803.45 dollars.
After 72 months, remains to pay 1104.89 dollars.
After 73 months, remains to pay 405.77 dollars.
After 74 months, remains to pay -293.90 dollars.
74
def func(x):
return 100000 * (1 + 0.08) ** 20 - x * ((1 + 0.08) ** 20 - 1) / 0.08
def secant(f, x0, x1, tol=0.0001, max_it = 100):
n = 1
while n <= 100:
x2 = x1 - f(x1) * ((x1-x0) / (f(x1) - f(x0)))
if (abs(x2 - x1) < tol):
return x2
else:
x0 = x1
x1 = x2
return False
According to secant method weve learnt at Mr Boston
s lectures, the yearly payments for the loan to be paid in 20 years at
8\% interest is: 10185.2208823\$
def func1(x):
return 100000 * (1 + x) ** 20 - 10000 * ((1 + x) ** 20 - 1) / x
def secant(f, x0, x1, tol=0.0001, max_it=100):
n = 1
while n <= 100:
x2 = x1 - f(x1) * ((x1 - x0) / (f(x1) - f(x0)))
if (abs(x2 - x1) < tol):
return x2
else:
x0 = x1
x1 = x2
return False
The interest rate required for the loan to be paid in 20 years is 0.077546898357042 that is somewhere 7.7\%.
from math import *
from decimal import *
# from decimal import Context, MAX_EMAX
#here is the code for problem 1
a = [10, 100, 1000]
getcontext().prec = 1000
def function1(x):
a = Decimal(x) * Decimal(e) ** Decimal(x)
b = Decimal(x ** (-2))
f = (Decimal(a) - Decimal(b)) / (Decimal(a) + Decimal(b))
return f
for i in range(len(a)):
print "The result for %s simulations is: " % str(a[i])
print function1(a[i])
from math import *
# here is the code for problem 2
c = 0.1
for x in range (15) :
r = pow(10, (1 + 2*x/10)) - pow(10, (1+2*x*c))
print r
from math import *
import pprint
pp = pprint.PrettyPrinter(indent = 4)
io = 1 - exp(-1)
def function(k):
if k == 0:
return io
else:
return k*function(k-1) - exp(-1)
A = []
for i in range(25):
A.append(function(i))
pp.pprint(A)
import numpy as np
import math
import matplotlib.pyplot as plt
# definition of EscVel function
def EscVel(z,c,N):
n = 0
while abs(z)<=2 and n<N:
n += 1
z = z ** 2 + c
return n
# definition of JuliaSet function
def JuliaSet(zmax,c,N):
x = np.linspace(-zmax,zmax, 500)
y = np.linspace(-zmax,zmax, 500)
# matrix for storing complex numbers
Z = []
for i in x:
for m in y:
complex_nr = i + m*1j
Z.append(complex_nr)
Z = np.array(Z).reshape(500,500)
# matrix for storing the escape velocities
M = []
for line in Z:
for element in line:
M.append(EscVel(element,c,N))
M = np.array(M).reshape(500,500)
return M
#definition of plot funtion
def plot(M):
im = plt.figure().gca()
im.imshow(M, interpolation='nearest')
#plotting the result
plot(JuliaSet(1,-0.297491+0.641051j, 100))