Also you guys complain about AI and how it won't replace jobs. Here's data analysis with python.
import requests
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
# Fetch historical price data for Shiba Inu coin from CoinGecko API
def fetch_shiba_inu_data():
url = '
https://api.coingecko.com/api/v3/coins/shiba-inu/market_chart?vs_currency=usd&days=max'
response = requests.get(url)
data = response.json()
prices = data['prices']
df = pd.DataFrame(prices, columns=['Timestamp', 'Price'])
df['Timestamp'] = pd.to_datetime(df['Timestamp'], unit='ms')
df.set_index('Timestamp', inplace=True)
return df
# Fetch the data
data = fetch_shiba_inu_data()
# Preprocess the data and prepare it for training
scaler = MinMaxScaler(feature_range=(-1, 1))
prices = data['Price'].values.reshape(-1, 1)
scaled_prices = scaler.fit_transform(prices)
def create_dataset(dataset, look_back=1):
X, Y = [], []
for i in range(len(dataset) - look_back):
a = dataset[i
i + look_back), 0]
X.append(a)
Y.append(dataset[i + look_back, 0])
return np.array(X), np.array(Y)
look_back = 10
X, Y = create_dataset(scaled_prices, look_back)
# Split the data into training and testing sets
train_size = int(len(X) * 0.8)
X_train, Y_train = X[:train_size], Y[:train_size]
X_test, Y_test = X[train_size:], Y[train_size:]
# Convert the data to PyTorch tensors
X_train = torch.from_numpy(X_train).float()
Y_train = torch.from_numpy(Y_train).float()
X_test = torch.from_numpy(X_test).float()
Y_test = torch.from_numpy(Y_test).float()
# Define the neural network model using PyTorch
class RegressionModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RegressionModel, self).__init__()
self.hidden = nn.Linear(input_size, hidden_size)
self.output = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = torch.relu(self.hidden(x))
x = self.output(x)
return x
input_size = look_back
hidden_size = 64
output_size = 1
model = RegressionModel(input_size, hidden_size, output_size)
# Train the model using the prepared data
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
num_epochs = 100
for epoch in range(num_epochs):
inputs = X_train
targets = Y_train.view(-1, 1)
outputs = model(inputs)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
# Evaluate the model's performance and make predictions
model.eval()
with torch.no_grad():
inputs = X_test
targets = Y_test.view(-1, 1)
outputs = model(inputs)
loss = criterion(outputs, targets)
print(f'Test Loss: {loss.item():.4f}')
# Make predictions
predicted_prices = scaler.inverse_transform(outputs.numpy())
# Get the dates for the predicted prices
last_date = data.index[-1]
future_dates = pd.date_range(start=last_date, periods=len(predicted_prices)+1, freq='D')[1:]
# Create a DataFrame for the predicted prices
predicted_df = pd.DataFrame({'Date': future_dates, 'Price': predicted_prices.flatten()})
predicted_df.set_index('Date', inplace=True)
# Plot the actual and predicted prices
plt.figure(figsize=(12, 6))
plt.plot(data.index, data['Price'], label='Actual Price')
plt.plot(predicted_df.index, predicted_df['Price'], label='Predicted Price')
plt.xlabel('Date')
plt.ylabel('Price (USD)')
plt.legend()
plt.show()