Initial commit for a3
This commit is contained in:
128
Aufgabe 3/aufgabe03.ipynb
Normal file
128
Aufgabe 3/aufgabe03.ipynb
Normal file
@ -0,0 +1,128 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"rng = np.random.default_rng()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Aufgabe 1\n",
|
||||
"\n",
|
||||
"relu = np.vectorize(lambda x : max(0, x))\n",
|
||||
"\n",
|
||||
"sigmoid = np.vectorize(lambda x : 1 / (1 + np.exp(-x)))\n",
|
||||
"\n",
|
||||
"inputs = [rng.integers(0, 1, size=(2,1), endpoint=True) for _ in range(100)]\n",
|
||||
"outputs = [a[0] ^ b[0] for a, b in inputs]\n",
|
||||
"data = list(zip(inputs, outputs))\n",
|
||||
"\n",
|
||||
"# Aufgabe 3\n",
|
||||
"\n",
|
||||
"# Binary Cross Entropy Loss\n",
|
||||
"bcel = np.vectorize(lambda y, ŷ : -(y * np.log(ŷ) + (1 - y) * np.log(1 - ŷ)))\n",
|
||||
"\n",
|
||||
"# Derivations\n",
|
||||
"bcel_derivation = np.vectorize(lambda y, ŷ: (1 / (1 - ŷ)) if y == 0 else -(1 / ŷ))\n",
|
||||
"sigmoid_derivation = np.vectorize(lambda x: (1 / (1 + np.exp(-x))) * (1 - (1 / (1 + np.exp(-x)))))\n",
|
||||
"relu_derivation = np.vectorize(lambda x: 0 if x < 0 else 1)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0.7722866010132297\n",
|
||||
"0.7722866010132297\n",
|
||||
"0.9354266393448504\n",
|
||||
"0.7722866010132297\n",
|
||||
"0.7722866010132297\n",
|
||||
"0.7722866010132297\n",
|
||||
"0.8102957251424703\n",
|
||||
"0.7722866010132297\n",
|
||||
"0.5\n",
|
||||
"0.7722866010132297\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Aufgabe 2\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class NeuralNet:\n",
|
||||
" def __init__(self, inputs: int = 2, hidden_layers: int = 1, hidden_layer_neurons: int = 4):\n",
|
||||
" self.input_shape = (inputs, 1)\n",
|
||||
"\n",
|
||||
" # Construct weights for hidden layer\n",
|
||||
" self.weights = []\n",
|
||||
" for i in range(hidden_layers):\n",
|
||||
" num_inputs = inputs if i == 0 else hidden_layer_neurons # First hidden layer only needs 2x4 weight matrix\n",
|
||||
" self.weights.append(rng.uniform(low=-1.0, high=1.0, size=(hidden_layer_neurons, num_inputs)))\n",
|
||||
"\n",
|
||||
" # Construct weights for output layer\n",
|
||||
" self.outweights = rng.uniform(low=-1.0, high=1.0, size=(1, hidden_layer_neurons))\n",
|
||||
"\n",
|
||||
" def forward_pass(self, x) -> float:\n",
|
||||
" x = np.array(x)\n",
|
||||
" if x.shape != self.input_shape:\n",
|
||||
" raise ValueError(f\"Input must be of shape {self.input_shape}.\")\n",
|
||||
"\n",
|
||||
" # Hidden layers\n",
|
||||
" for layer in self.weights:\n",
|
||||
" x = relu(np.matmul(layer, x))\n",
|
||||
"\n",
|
||||
" # Output layer\n",
|
||||
" return sigmoid(np.matmul(self.outweights, x))[0][0]\n",
|
||||
" \n",
|
||||
" # Aufgabe 4\n",
|
||||
" def backward_pass(self, learning_rate: float):\n",
|
||||
" ...\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"nn = NeuralNet(hidden_layers=1)\n",
|
||||
"\n",
|
||||
"for input_values, expected in data[:10]:\n",
|
||||
" actual = nn.forward_pass(input_values)\n",
|
||||
"\n",
|
||||
" print(actual)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
Reference in New Issue
Block a user