Webscraping

You gotta source your data one way or the other

Existing projects for home prices from SmartAssests

future projects

scrape your instagram artist

import requests
from bs4 import BeautifulSoup

# The URL of the Instagram profile you want to scrape
instagram_url = "https://www.instagram.com/your_username/"

# Send a GET request to the URL
response = requests.get(instagram_url)

# Parse the response HTML using BeautifulSoup
soup = BeautifulSoup(response.text, "html.parser")

# Find all of the images on the page
images = soup.find_all("img")

# Loop through the images and download them to your local machine
for image in images:
    # Get the URL of the image
    img_src = image.get("src")

    # Download the image
    response = requests.get(img_src)

    # Save the image to your local machine
    with open("instagram_image.jpg", "wb") as f:
        f.write(response.content)

Then classifiy and annotate there pictures


import cv2
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input

# Load the pre-trained InceptionV3 model with ImageNet weights
model = InceptionV3(weights='imagenet')

# Define a function to classify an image as a portrait or a landscape
def classify_image(image):
  # Preprocess the image for the InceptionV3 model
  image = cv2.resize(image, (299, 299))
  image = preprocess_input(image)

  # Use the InceptionV3 model to predict the classes of the image
  preds = model.predict(image)

  # Check if the 'portrait' or 'landscape' class has the highest predicted probability
  if preds[0][101] > preds[0][177]:
    return 'portrait'
  else:
    return 'landscape'

# Load an image and classify it as a portrait or a landscape
image = cv2.imread('image.jpg')
label = classify_image(image)
print('This image is classified as a', label)